mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-09 02:22:54 +00:00
Compare commits
10 Commits
removedali
...
nrt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9bc6a43917 | ||
|
|
e5bf41c1f6 | ||
|
|
58d40ebf95 | ||
|
|
f07634517f | ||
|
|
2edea107ef | ||
|
|
a6b5f4f5b5 | ||
|
|
7859ee4a39 | ||
|
|
f06e116aae | ||
|
|
ec32e0546a | ||
|
|
0cd10e1197 |
20
CHANGELOG.md
20
CHANGELOG.md
@@ -1,23 +1,3 @@
|
|||||||
Tantivy 0.13.0
|
|
||||||
======================
|
|
||||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
|
||||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
|
||||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
|
||||||
- Change in the DocSet and Scorer API. (@fulmicoton).
|
|
||||||
A freshly created DocSet point directly to their first doc. A sentinel value called TERMINATED marks the end of a DocSet.
|
|
||||||
`.advance()` returns the new DocId. `Scorer::skip(target)` has been replaced by `Scorer::seek(target)` and returns the resulting DocId.
|
|
||||||
As a result, iterating through DocSet now looks as follows
|
|
||||||
```rust
|
|
||||||
let mut doc = docset.doc();
|
|
||||||
while doc != TERMINATED {
|
|
||||||
// ...
|
|
||||||
doc = docset.advance();
|
|
||||||
}
|
|
||||||
```
|
|
||||||
The change made it possible to greatly simplify a lot of the docset's code.
|
|
||||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
|
||||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
|
||||||
|
|
||||||
Tantivy 0.12.0
|
Tantivy 0.12.0
|
||||||
======================
|
======================
|
||||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
- Removing static dispatch in tokenizers for simplicity. (#762)
|
||||||
|
|||||||
12
Cargo.toml
12
Cargo.toml
@@ -18,7 +18,7 @@ byteorder = "1.0"
|
|||||||
crc32fast = "1.2.0"
|
crc32fast = "1.2.0"
|
||||||
once_cell = "1.0"
|
once_cell = "1.0"
|
||||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||||
tantivy-fst = "0.3"
|
tantivy-fst = "0.2.1"
|
||||||
memmap = {version = "0.7", optional=true}
|
memmap = {version = "0.7", optional=true}
|
||||||
lz4 = {version="1.20", optional=true}
|
lz4 = {version="1.20", optional=true}
|
||||||
snap = "1"
|
snap = "1"
|
||||||
@@ -29,7 +29,7 @@ serde = {version="1.0", features=["derive"]}
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
num_cpus = "1.2"
|
num_cpus = "1.2"
|
||||||
fs2={version="0.4", optional=true}
|
fs2={version="0.4", optional=true}
|
||||||
levenshtein_automata = "0.2"
|
levenshtein_automata = "0.1"
|
||||||
notify = {version="4", optional=true}
|
notify = {version="4", optional=true}
|
||||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||||
crossbeam = "0.7"
|
crossbeam = "0.7"
|
||||||
@@ -38,14 +38,14 @@ owning_ref = "0.4"
|
|||||||
stable_deref_trait = "1.0.0"
|
stable_deref_trait = "1.0.0"
|
||||||
rust-stemmers = "1.2"
|
rust-stemmers = "1.2"
|
||||||
downcast-rs = { version="1.0" }
|
downcast-rs = { version="1.0" }
|
||||||
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
|
tantivy-query-grammar = { version="0.12", path="./query-grammar" }
|
||||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||||
census = "0.4"
|
census = {path="../census"}
|
||||||
fnv = "1.0.6"
|
fnv = "1.0.6"
|
||||||
owned-read = "0.4"
|
owned-read = "0.4"
|
||||||
failure = "0.1"
|
failure = "0.1"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3.1"
|
||||||
fail = "0.4"
|
fail = "0.3"
|
||||||
murmurhash32 = "0.2"
|
murmurhash32 = "0.2"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
smallvec = "1.0"
|
smallvec = "1.0"
|
||||||
@@ -60,7 +60,7 @@ maplit = "1"
|
|||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
[dev-dependencies.fail]
|
||||||
version = "0.4"
|
version = "0.3"
|
||||||
features = ["failpoints"]
|
features = ["failpoints"]
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
|
|||||||
10
README.md
10
README.md
@@ -31,16 +31,12 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
|||||||
|
|
||||||
# Benchmark
|
# Benchmark
|
||||||
|
|
||||||
|
Tantivy is typically faster than Lucene, but the results depend on
|
||||||
|
the nature of the queries in your workload.
|
||||||
|
|
||||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||||
performance for different type of queries / collection.
|
performance for different type of queries / collection.
|
||||||
|
|
||||||
|
|
||||||
In general, Tantivy tends to be
|
|
||||||
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
|
|
||||||
- faster than Lucene on intersection and phrase queries.
|
|
||||||
|
|
||||||
Your mileage WILL vary depending on the nature of queries and their load.
|
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
- Full-text search
|
- Full-text search
|
||||||
|
|||||||
9
TODO.md
Normal file
9
TODO.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
- segment writer serializes if already flush
|
||||||
|
- flush if exceed max doc.
|
||||||
|
- unit tests
|
||||||
|
- reader from `IndexWriter`
|
||||||
|
- configurable limit to flush
|
||||||
|
- bundle directory
|
||||||
|
- add index worker when exceeds some doc limit
|
||||||
|
- flush before prepare commit.
|
||||||
|
- segment_writer should not receive a segment
|
||||||
@@ -18,5 +18,5 @@ install:
|
|||||||
build: false
|
build: false
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- REM SET RUST_LOG=tantivy,test & cargo test --all --verbose --no-default-features --features mmap
|
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap
|
||||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||||
|
|||||||
@@ -1,98 +0,0 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
use tantivy::collector::TopDocs;
|
|
||||||
use tantivy::doc;
|
|
||||||
use tantivy::query::BooleanQuery;
|
|
||||||
use tantivy::schema::*;
|
|
||||||
use tantivy::{DocId, Index, Score, SegmentReader};
|
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
|
|
||||||
let title = schema_builder.add_text_field("title", STORED);
|
|
||||||
let ingredient = schema_builder.add_facet_field("ingredient");
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
|
||||||
|
|
||||||
let mut index_writer = index.writer(30_000_000)?;
|
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Fried egg",
|
|
||||||
ingredient => Facet::from("/ingredient/egg"),
|
|
||||||
ingredient => Facet::from("/ingredient/oil"),
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Scrambled egg",
|
|
||||||
ingredient => Facet::from("/ingredient/egg"),
|
|
||||||
ingredient => Facet::from("/ingredient/butter"),
|
|
||||||
ingredient => Facet::from("/ingredient/milk"),
|
|
||||||
ingredient => Facet::from("/ingredient/salt"),
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Egg rolls",
|
|
||||||
ingredient => Facet::from("/ingredient/egg"),
|
|
||||||
ingredient => Facet::from("/ingredient/garlic"),
|
|
||||||
ingredient => Facet::from("/ingredient/salt"),
|
|
||||||
ingredient => Facet::from("/ingredient/oil"),
|
|
||||||
ingredient => Facet::from("/ingredient/tortilla-wrap"),
|
|
||||||
ingredient => Facet::from("/ingredient/mushroom"),
|
|
||||||
));
|
|
||||||
index_writer.commit()?;
|
|
||||||
|
|
||||||
let reader = index.reader()?;
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
{
|
|
||||||
let facets = vec![
|
|
||||||
Facet::from("/ingredient/egg"),
|
|
||||||
Facet::from("/ingredient/oil"),
|
|
||||||
Facet::from("/ingredient/garlic"),
|
|
||||||
Facet::from("/ingredient/mushroom"),
|
|
||||||
];
|
|
||||||
let query = BooleanQuery::new_multiterms_query(
|
|
||||||
facets
|
|
||||||
.iter()
|
|
||||||
.map(|key| Term::from_facet(ingredient, &key))
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
let top_docs_by_custom_score =
|
|
||||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
|
||||||
let mut ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
|
|
||||||
let facet_dict = ingredient_reader.facet_dict();
|
|
||||||
|
|
||||||
let query_ords: HashSet<u64> = facets
|
|
||||||
.iter()
|
|
||||||
.filter_map(|key| facet_dict.term_ord(key.encoded_str()))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);
|
|
||||||
|
|
||||||
move |doc: DocId, original_score: Score| {
|
|
||||||
ingredient_reader.facet_ords(doc, &mut facet_ords_buffer);
|
|
||||||
let missing_ingredients = facet_ords_buffer
|
|
||||||
.iter()
|
|
||||||
.filter(|ord| !query_ords.contains(ord))
|
|
||||||
.count();
|
|
||||||
let tweak = 1.0 / 4_f32.powi(missing_ingredients as i32);
|
|
||||||
|
|
||||||
original_score * tweak
|
|
||||||
}
|
|
||||||
});
|
|
||||||
let top_docs = searcher.search(&query, &top_docs_by_custom_score)?;
|
|
||||||
|
|
||||||
let titles: Vec<String> = top_docs
|
|
||||||
.iter()
|
|
||||||
.map(|(_, doc_id)| {
|
|
||||||
searcher
|
|
||||||
.doc(*doc_id)
|
|
||||||
.unwrap()
|
|
||||||
.get_first(title)
|
|
||||||
.unwrap()
|
|
||||||
.text()
|
|
||||||
.unwrap()
|
|
||||||
.to_owned()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(titles, vec!["Fried egg", "Egg rolls"]);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::schema::*;
|
use tantivy::schema::*;
|
||||||
use tantivy::{doc, DocSet, Index, Postings, TERMINATED};
|
use tantivy::{doc, DocId, DocSet, Index, Postings};
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
// We first create a schema for the sake of the
|
// We first create a schema for the sake of the
|
||||||
@@ -62,11 +62,12 @@ fn main() -> tantivy::Result<()> {
|
|||||||
{
|
{
|
||||||
// this buffer will be used to request for positions
|
// this buffer will be used to request for positions
|
||||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||||
let mut doc_id = segment_postings.doc();
|
while segment_postings.advance() {
|
||||||
while doc_id != TERMINATED {
|
// the number of time the term appears in the document.
|
||||||
|
let doc_id: DocId = segment_postings.doc(); //< do not try to access this before calling advance once.
|
||||||
|
|
||||||
// This MAY contains deleted documents as well.
|
// This MAY contains deleted documents as well.
|
||||||
if segment_reader.is_deleted(doc_id) {
|
if segment_reader.is_deleted(doc_id) {
|
||||||
doc_id = segment_postings.advance();
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,7 +86,6 @@ fn main() -> tantivy::Result<()> {
|
|||||||
// Doc 2: TermFreq 1: [0]
|
// Doc 2: TermFreq 1: [0]
|
||||||
// ```
|
// ```
|
||||||
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
println!("Doc {}: TermFreq {}: {:?}", doc_id, term_freq, positions);
|
||||||
doc_id = segment_postings.advance();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.13.0"
|
version = "0.12.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
|
|||||||
@@ -154,11 +154,17 @@ fn negate(expr: UserInputAST) -> UserInputAST {
|
|||||||
expr.unary(Occur::MustNot)
|
expr.unary(Occur::MustNot)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn must(expr: UserInputAST) -> UserInputAST {
|
||||||
|
expr.unary(Occur::Must)
|
||||||
|
}
|
||||||
|
|
||||||
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
parser(|input| {
|
parser(|input| {
|
||||||
char('(')
|
char('-')
|
||||||
.with(ast())
|
.with(leaf())
|
||||||
.skip(char(')'))
|
.map(negate)
|
||||||
|
.or(char('+').with(leaf()).map(must))
|
||||||
|
.or(char('(').with(ast()).skip(char(')')))
|
||||||
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All)))
|
||||||
.or(attempt(
|
.or(attempt(
|
||||||
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
string("NOT").skip(spaces1()).with(leaf()).map(negate),
|
||||||
@@ -170,16 +176,6 @@ fn leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn occur_symbol<'a>() -> impl Parser<&'a str, Output = Occur> {
|
|
||||||
char('-')
|
|
||||||
.map(|_| Occur::MustNot)
|
|
||||||
.or(char('+').map(|_| Occur::Must))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAST)> {
|
|
||||||
(optional(occur_symbol()), boosted_leaf())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
||||||
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||||
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||||
@@ -243,29 +239,21 @@ fn aggregate_binary_expressions(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn operand_leaf<'a>() -> impl Parser<&'a str, Output = (BinaryOperand, UserInputAST)> {
|
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
(
|
let operand_leaf = (
|
||||||
binary_operand().skip(spaces()),
|
binary_operand().skip(spaces()),
|
||||||
boosted_leaf().skip(spaces()),
|
boosted_leaf().skip(spaces()),
|
||||||
)
|
);
|
||||||
}
|
let boolean_expr = (boosted_leaf().skip(spaces().silent()), many1(operand_leaf))
|
||||||
|
|
||||||
pub fn ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
|
||||||
let boolean_expr = (boosted_leaf().skip(spaces()), many1(operand_leaf()))
|
|
||||||
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
.map(|(left, right)| aggregate_binary_expressions(left, right));
|
||||||
let whitespace_separated_leaves = many1(occur_leaf().skip(spaces().silent())).map(
|
let whitespace_separated_leaves =
|
||||||
|subqueries: Vec<(Option<Occur>, UserInputAST)>| {
|
many1(boosted_leaf().skip(spaces().silent())).map(|subqueries: Vec<UserInputAST>| {
|
||||||
if subqueries.len() == 1 {
|
if subqueries.len() == 1 {
|
||||||
let (occur_opt, ast) = subqueries.into_iter().next().unwrap();
|
subqueries.into_iter().next().unwrap()
|
||||||
match occur_opt.unwrap_or(Occur::Should) {
|
|
||||||
Occur::Must | Occur::Should => ast,
|
|
||||||
Occur::MustNot => UserInputAST::Clause(vec![(Some(Occur::MustNot), ast)]),
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
UserInputAST::Clause(subqueries.into_iter().collect())
|
UserInputAST::Clause(subqueries.into_iter().collect())
|
||||||
}
|
}
|
||||||
},
|
});
|
||||||
);
|
|
||||||
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
let expr = attempt(boolean_expr).or(whitespace_separated_leaves);
|
||||||
spaces().with(expr).skip(spaces())
|
spaces().with(expr).skip(spaces())
|
||||||
}
|
}
|
||||||
@@ -295,12 +283,6 @@ mod test {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_occur_symbol() {
|
|
||||||
assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
|
|
||||||
assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_positive_float_number() {
|
fn test_positive_float_number() {
|
||||||
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
|
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
|
||||||
@@ -348,7 +330,7 @@ mod test {
|
|||||||
"Err(UnexpectedParse)"
|
"Err(UnexpectedParse)"
|
||||||
);
|
);
|
||||||
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
||||||
test_parse_query_to_ast_helper("NOT a", "(-\"a\")");
|
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -356,16 +338,16 @@ mod test {
|
|||||||
assert!(parse_to_ast().parse("a^2^3").is_err());
|
assert!(parse_to_ast().parse("a^2^3").is_err());
|
||||||
assert!(parse_to_ast().parse("a^2^").is_err());
|
assert!(parse_to_ast().parse("a^2^").is_err());
|
||||||
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
test_parse_query_to_ast_helper("a^3", "(\"a\")^3");
|
||||||
test_parse_query_to_ast_helper("a^3 b^2", "(*(\"a\")^3 *(\"b\")^2)");
|
test_parse_query_to_ast_helper("a^3 b^2", "((\"a\")^3 (\"b\")^2)");
|
||||||
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
test_parse_query_to_ast_helper("a^1", "\"a\"");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_query_to_ast_binary_op() {
|
fn test_parse_query_to_ast_binary_op() {
|
||||||
test_parse_query_to_ast_helper("a AND b", "(+\"a\" +\"b\")");
|
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
|
||||||
test_parse_query_to_ast_helper("a OR b", "(?\"a\" ?\"b\")");
|
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
|
||||||
test_parse_query_to_ast_helper("a OR b AND c", "(?\"a\" ?(+\"b\" +\"c\"))");
|
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
|
||||||
test_parse_query_to_ast_helper("a AND b AND c", "(+\"a\" +\"b\" +\"c\")");
|
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
||||||
"Err(UnexpectedParse)"
|
"Err(UnexpectedParse)"
|
||||||
@@ -403,13 +385,6 @@ mod test {
|
|||||||
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_occur_leaf() {
|
|
||||||
let ((occur, ast), _) = super::occur_leaf().parse("+abc").unwrap();
|
|
||||||
assert_eq!(occur, Some(Occur::Must));
|
|
||||||
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_range_parser() {
|
fn test_range_parser() {
|
||||||
// testing the range() parser separately
|
// testing the range() parser separately
|
||||||
@@ -438,67 +413,32 @@ mod test {
|
|||||||
fn test_parse_query_to_triming_spaces() {
|
fn test_parse_query_to_triming_spaces() {
|
||||||
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
test_parse_query_to_ast_helper(" abc", "\"abc\"");
|
||||||
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
test_parse_query_to_ast_helper("abc ", "\"abc\"");
|
||||||
test_parse_query_to_ast_helper("( a OR abc)", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("( a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc)", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc)", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("a OR abc ", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("a OR abc ", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc )", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc )", "(?(\"a\") ?(\"abc\"))");
|
||||||
test_parse_query_to_ast_helper("(a OR abc) ", "(?\"a\" ?\"abc\")");
|
test_parse_query_to_ast_helper("(a OR abc) ", "(?(\"a\") ?(\"abc\"))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_parse_query_single_term() {
|
fn test_parse_query_to_ast() {
|
||||||
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
test_parse_query_to_ast_helper("abc", "\"abc\"");
|
||||||
}
|
test_parse_query_to_ast_helper("a b", "(\"a\" \"b\")");
|
||||||
|
test_parse_query_to_ast_helper("+(a b)", "+((\"a\" \"b\"))");
|
||||||
#[test]
|
test_parse_query_to_ast_helper("+d", "+(\"d\")");
|
||||||
fn test_parse_query_default_clause() {
|
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
|
||||||
test_parse_query_to_ast_helper("a b", "(*\"a\" *\"b\")");
|
test_parse_query_to_ast_helper("(+a +b) d", "((+(\"a\") +(\"b\")) \"d\")");
|
||||||
}
|
test_parse_query_to_ast_helper("(+a)", "+(\"a\")");
|
||||||
|
test_parse_query_to_ast_helper("(+a +b)", "(+(\"a\") +(\"b\"))");
|
||||||
#[test]
|
|
||||||
fn test_parse_query_must_default_clause() {
|
|
||||||
test_parse_query_to_ast_helper("+(a b)", "(*\"a\" *\"b\")");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_query_must_single_term() {
|
|
||||||
test_parse_query_to_ast_helper("+d", "\"d\"");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_single_term_with_field() {
|
|
||||||
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_single_term_with_float() {
|
|
||||||
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
|
||||||
}
|
test_parse_query_to_ast_helper("+abc:toto", "+(abc:\"toto\")");
|
||||||
|
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+(abc:\"toto\") -(\"titi\"))");
|
||||||
#[test]
|
test_parse_query_to_ast_helper("-abc:toto", "-(abc:\"toto\")");
|
||||||
fn test_must_clause() {
|
test_parse_query_to_ast_helper("abc:a b", "(abc:\"a\" \"b\")");
|
||||||
test_parse_query_to_ast_helper("(+a +b)", "(+\"a\" +\"b\")");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_test_query_plus_a_b_plus_d() {
|
|
||||||
test_parse_query_to_ast_helper("+(a b) +d", "(+(*\"a\" *\"b\") +\"d\")");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_test_query_other() {
|
|
||||||
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
|
|
||||||
test_parse_query_to_ast_helper("+abc:toto", "abc:\"toto\"");
|
|
||||||
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+abc:\"toto\" -\"titi\")");
|
|
||||||
test_parse_query_to_ast_helper("-abc:toto", "(-abc:\"toto\")");
|
|
||||||
test_parse_query_to_ast_helper("abc:a b", "(*abc:\"a\" *\"b\")");
|
|
||||||
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
|
||||||
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_query_with_range() {
|
|
||||||
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
|
||||||
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
|
||||||
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
|
||||||
|
|||||||
@@ -85,14 +85,15 @@ impl UserInputBound {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub enum UserInputAST {
|
pub enum UserInputAST {
|
||||||
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
Clause(Vec<UserInputAST>),
|
||||||
|
Unary(Occur, Box<UserInputAST>),
|
||||||
Leaf(Box<UserInputLeaf>),
|
Leaf(Box<UserInputLeaf>),
|
||||||
Boost(Box<UserInputAST>, f32),
|
Boost(Box<UserInputAST>, f32),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserInputAST {
|
impl UserInputAST {
|
||||||
pub fn unary(self, occur: Occur) -> UserInputAST {
|
pub fn unary(self, occur: Occur) -> UserInputAST {
|
||||||
UserInputAST::Clause(vec![(Some(occur), self)])
|
UserInputAST::Unary(occur, Box::new(self))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
||||||
@@ -103,7 +104,7 @@ impl UserInputAST {
|
|||||||
} else {
|
} else {
|
||||||
UserInputAST::Clause(
|
UserInputAST::Clause(
|
||||||
asts.into_iter()
|
asts.into_iter()
|
||||||
.map(|ast: UserInputAST| (Some(occur), ast))
|
.map(|ast: UserInputAST| ast.unary(occur))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -134,36 +135,25 @@ impl From<UserInputLeaf> for UserInputAST {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn print_occur_ast(
|
|
||||||
occur_opt: Option<Occur>,
|
|
||||||
ast: &UserInputAST,
|
|
||||||
formatter: &mut fmt::Formatter,
|
|
||||||
) -> fmt::Result {
|
|
||||||
if let Some(occur) = occur_opt {
|
|
||||||
write!(formatter, "{}{:?}", occur, ast)?;
|
|
||||||
} else {
|
|
||||||
write!(formatter, "*{:?}", ast)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for UserInputAST {
|
impl fmt::Debug for UserInputAST {
|
||||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
match *self {
|
match *self {
|
||||||
UserInputAST::Clause(ref subqueries) => {
|
UserInputAST::Clause(ref subqueries) => {
|
||||||
if subqueries.is_empty() {
|
if subqueries.is_empty() {
|
||||||
write!(formatter, "<emptyclause>")?;
|
write!(formatter, "<emptyclause>")?;
|
||||||
} else {
|
} else {
|
||||||
write!(formatter, "(")?;
|
write!(formatter, "(")?;
|
||||||
print_occur_ast(subqueries[0].0, &subqueries[0].1, formatter)?;
|
write!(formatter, "{:?}", &subqueries[0])?;
|
||||||
for subquery in &subqueries[1..] {
|
for subquery in &subqueries[1..] {
|
||||||
write!(formatter, " ")?;
|
write!(formatter, " {:?}", subquery)?;
|
||||||
print_occur_ast(subquery.0, &subquery.1, formatter)?;
|
|
||||||
}
|
}
|
||||||
write!(formatter, ")")?;
|
write!(formatter, ")")?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
UserInputAST::Unary(ref occur, ref subquery) => {
|
||||||
|
write!(formatter, "{}({:?})", occur, subquery)
|
||||||
|
}
|
||||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||||
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
UserInputAST::Boost(ref leaf, boost) => write!(formatter, "({:?})^{}", leaf, boost),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,13 +11,13 @@ impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
|
|||||||
where
|
where
|
||||||
TScore: Clone + PartialOrd,
|
TScore: Clone + PartialOrd,
|
||||||
{
|
{
|
||||||
pub(crate) fn new(
|
pub fn new(
|
||||||
custom_scorer: TCustomScorer,
|
custom_scorer: TCustomScorer,
|
||||||
collector: TopCollector<TScore>,
|
limit: usize,
|
||||||
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
) -> CustomScoreTopCollector<TCustomScorer, TScore> {
|
||||||
CustomScoreTopCollector {
|
CustomScoreTopCollector {
|
||||||
custom_scorer,
|
custom_scorer,
|
||||||
collector,
|
collector: TopCollector::with_limit(limit),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -28,7 +28,7 @@ where
|
|||||||
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
/// It is the segment local version of the [`CustomScorer`](./trait.CustomScorer.html).
|
||||||
pub trait CustomSegmentScorer<TScore>: 'static {
|
pub trait CustomSegmentScorer<TScore>: 'static {
|
||||||
/// Computes the score of a specific `doc`.
|
/// Computes the score of a specific `doc`.
|
||||||
fn score(&mut self, doc: DocId) -> TScore;
|
fn score(&self, doc: DocId) -> TScore;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `CustomScorer` makes it possible to define any kind of score.
|
/// `CustomScorer` makes it possible to define any kind of score.
|
||||||
@@ -117,9 +117,9 @@ where
|
|||||||
|
|
||||||
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
impl<F, TScore> CustomSegmentScorer<TScore> for F
|
||||||
where
|
where
|
||||||
F: 'static + FnMut(DocId) -> TScore,
|
F: 'static + Sync + Send + Fn(DocId) -> TScore,
|
||||||
{
|
{
|
||||||
fn score(&mut self, doc: DocId) -> TScore {
|
fn score(&self, doc: DocId) -> TScore {
|
||||||
(self)(doc)
|
(self)(doc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::collector::SegmentCollector;
|
use crate::collector::SegmentCollector;
|
||||||
|
use crate::docset::SkipResult;
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
@@ -187,11 +188,6 @@ pub struct FacetSegmentCollector {
|
|||||||
collapse_facet_ords: Vec<u64>,
|
collapse_facet_ords: Vec<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
enum SkipResult {
|
|
||||||
Found,
|
|
||||||
NotFound,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
||||||
target: &[u8],
|
target: &[u8],
|
||||||
collapse_it: &mut Peekable<I>,
|
collapse_it: &mut Peekable<I>,
|
||||||
@@ -201,14 +197,14 @@ fn skip<'a, I: Iterator<Item = &'a Facet>>(
|
|||||||
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
Some(facet_bytes) => match facet_bytes.encoded_str().as_bytes().cmp(target) {
|
||||||
Ordering::Less => {}
|
Ordering::Less => {}
|
||||||
Ordering::Greater => {
|
Ordering::Greater => {
|
||||||
return SkipResult::NotFound;
|
return SkipResult::OverStep;
|
||||||
}
|
}
|
||||||
Ordering::Equal => {
|
Ordering::Equal => {
|
||||||
return SkipResult::Found;
|
return SkipResult::Reached;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => {
|
None => {
|
||||||
return SkipResult::NotFound;
|
return SkipResult::End;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
collapse_it.next();
|
collapse_it.next();
|
||||||
@@ -285,7 +281,7 @@ impl Collector for FacetCollector {
|
|||||||
// is positionned on a term that has not been processed yet.
|
// is positionned on a term that has not been processed yet.
|
||||||
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
let skip_result = skip(facet_streamer.key(), &mut collapse_facet_it);
|
||||||
match skip_result {
|
match skip_result {
|
||||||
SkipResult::Found => {
|
SkipResult::Reached => {
|
||||||
// we reach a facet we decided to collapse.
|
// we reach a facet we decided to collapse.
|
||||||
let collapse_depth = facet_depth(facet_streamer.key());
|
let collapse_depth = facet_depth(facet_streamer.key());
|
||||||
let mut collapsed_id = 0;
|
let mut collapsed_id = 0;
|
||||||
@@ -305,7 +301,7 @@ impl Collector for FacetCollector {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
SkipResult::NotFound => {
|
SkipResult::End | SkipResult::OverStep => {
|
||||||
collapse_mapping.push(0);
|
collapse_mapping.push(0);
|
||||||
if !facet_streamer.advance() {
|
if !facet_streamer.advance() {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -109,7 +109,6 @@ pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
|
|||||||
|
|
||||||
mod facet_collector;
|
mod facet_collector;
|
||||||
pub use self::facet_collector::FacetCollector;
|
pub use self::facet_collector::FacetCollector;
|
||||||
use crate::query::Weight;
|
|
||||||
|
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
@@ -155,29 +154,6 @@ pub trait Collector: Sync {
|
|||||||
/// Combines the fruit associated to the collection of each segments
|
/// Combines the fruit associated to the collection of each segments
|
||||||
/// into one fruit.
|
/// into one fruit.
|
||||||
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
|
fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
|
||||||
|
|
||||||
/// Created a segment collector and
|
|
||||||
fn collect_segment(
|
|
||||||
&self,
|
|
||||||
weight: &dyn Weight,
|
|
||||||
segment_ord: u32,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
|
||||||
let mut segment_collector = self.for_segment(segment_ord as u32, reader)?;
|
|
||||||
|
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
|
||||||
weight.for_each(reader, &mut |doc, score| {
|
|
||||||
if delete_bitset.is_alive(doc) {
|
|
||||||
segment_collector.collect(doc, score);
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
} else {
|
|
||||||
weight.for_each(reader, &mut |doc, score| {
|
|
||||||
segment_collector.collect(doc, score);
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
Ok(segment_collector.harvest())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The `SegmentCollector` is the trait in charge of defining the
|
/// The `SegmentCollector` is the trait in charge of defining the
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ use std::collections::BinaryHeap;
|
|||||||
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
||||||
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
||||||
/// struct is never public.
|
/// struct is never public.
|
||||||
pub(crate) struct ComparableDoc<T, D> {
|
struct ComparableDoc<T, D> {
|
||||||
pub feature: T,
|
feature: T,
|
||||||
pub doc: D,
|
doc: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
||||||
@@ -56,8 +56,7 @@ impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
|
|||||||
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
||||||
|
|
||||||
pub(crate) struct TopCollector<T> {
|
pub(crate) struct TopCollector<T> {
|
||||||
pub limit: usize,
|
limit: usize,
|
||||||
pub offset: usize,
|
|
||||||
_marker: PhantomData<T>,
|
_marker: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,20 +72,14 @@ where
|
|||||||
if limit < 1 {
|
if limit < 1 {
|
||||||
panic!("Limit must be strictly greater than 0.");
|
panic!("Limit must be strictly greater than 0.");
|
||||||
}
|
}
|
||||||
Self {
|
TopCollector {
|
||||||
limit,
|
limit,
|
||||||
offset: 0,
|
|
||||||
_marker: PhantomData,
|
_marker: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Skip the first "offset" documents when collecting.
|
pub fn limit(&self) -> usize {
|
||||||
///
|
self.limit
|
||||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
|
||||||
/// Lucene's TopDocsCollector.
|
|
||||||
pub fn and_offset(mut self, offset: usize) -> TopCollector<T> {
|
|
||||||
self.offset = offset;
|
|
||||||
self
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn merge_fruits(
|
pub fn merge_fruits(
|
||||||
@@ -99,7 +92,7 @@ where
|
|||||||
let mut top_collector = BinaryHeap::new();
|
let mut top_collector = BinaryHeap::new();
|
||||||
for child_fruit in children {
|
for child_fruit in children {
|
||||||
for (feature, doc) in child_fruit {
|
for (feature, doc) in child_fruit {
|
||||||
if top_collector.len() < (self.limit + self.offset) {
|
if top_collector.len() < self.limit {
|
||||||
top_collector.push(ComparableDoc { feature, doc });
|
top_collector.push(ComparableDoc { feature, doc });
|
||||||
} else if let Some(mut head) = top_collector.peek_mut() {
|
} else if let Some(mut head) = top_collector.peek_mut() {
|
||||||
if head.feature < feature {
|
if head.feature < feature {
|
||||||
@@ -111,7 +104,6 @@ where
|
|||||||
Ok(top_collector
|
Ok(top_collector
|
||||||
.into_sorted_vec()
|
.into_sorted_vec()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.skip(self.offset)
|
|
||||||
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
.map(|cdoc| (cdoc.feature, cdoc.doc))
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
@@ -121,23 +113,7 @@ where
|
|||||||
segment_id: SegmentLocalId,
|
segment_id: SegmentLocalId,
|
||||||
_: &SegmentReader,
|
_: &SegmentReader,
|
||||||
) -> crate::Result<TopSegmentCollector<F>> {
|
) -> crate::Result<TopSegmentCollector<F>> {
|
||||||
Ok(TopSegmentCollector::new(
|
Ok(TopSegmentCollector::new(segment_id, self.limit))
|
||||||
segment_id,
|
|
||||||
self.limit + self.offset,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new TopCollector with the same limit and offset.
|
|
||||||
///
|
|
||||||
/// Ideally we would use Into but the blanket implementation seems to cause the Scorer traits
|
|
||||||
/// to fail.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub(crate) fn into_tscore<TScore: PartialOrd + Clone>(self) -> TopCollector<TScore> {
|
|
||||||
TopCollector {
|
|
||||||
limit: self.limit,
|
|
||||||
offset: self.offset,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -211,7 +187,7 @@ impl<T: PartialOrd + Clone> TopSegmentCollector<T> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{TopCollector, TopSegmentCollector};
|
use super::TopSegmentCollector;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -272,48 +248,6 @@ mod tests {
|
|||||||
top_collector_limit_3.harvest()[..2].to_vec(),
|
top_collector_limit_3.harvest()[..2].to_vec(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_with_limit_and_offset() {
|
|
||||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
|
||||||
|
|
||||||
let results = collector
|
|
||||||
.merge_fruits(vec![vec![
|
|
||||||
(0.9, DocAddress(0, 1)),
|
|
||||||
(0.8, DocAddress(0, 2)),
|
|
||||||
(0.7, DocAddress(0, 3)),
|
|
||||||
(0.6, DocAddress(0, 4)),
|
|
||||||
(0.5, DocAddress(0, 5)),
|
|
||||||
]])
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
results,
|
|
||||||
vec![(0.8, DocAddress(0, 2)), (0.7, DocAddress(0, 3)),]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_with_limit_larger_than_set_and_offset() {
|
|
||||||
let collector = TopCollector::with_limit(2).and_offset(1);
|
|
||||||
|
|
||||||
let results = collector
|
|
||||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(results, vec![(0.8, DocAddress(0, 2)),]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_with_limit_and_offset_larger_than_set() {
|
|
||||||
let collector = TopCollector::with_limit(2).and_offset(20);
|
|
||||||
|
|
||||||
let results = collector
|
|
||||||
.merge_fruits(vec![vec![(0.9, DocAddress(0, 1)), (0.8, DocAddress(0, 2))]])
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(results, vec![]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
|||||||
@@ -1,20 +1,18 @@
|
|||||||
use super::Collector;
|
use super::Collector;
|
||||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||||
|
use crate::collector::top_collector::TopCollector;
|
||||||
use crate::collector::top_collector::TopSegmentCollector;
|
use crate::collector::top_collector::TopSegmentCollector;
|
||||||
use crate::collector::top_collector::{ComparableDoc, TopCollector};
|
|
||||||
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||||
use crate::collector::{
|
use crate::collector::{
|
||||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||||
};
|
};
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::query::Weight;
|
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::SegmentLocalId;
|
use crate::SegmentLocalId;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use std::collections::BinaryHeap;
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
/// The `TopDocs` collector keeps track of the top `K` documents
|
||||||
@@ -59,11 +57,7 @@ pub struct TopDocs(TopCollector<Score>);
|
|||||||
|
|
||||||
impl fmt::Debug for TopDocs {
|
impl fmt::Debug for TopDocs {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(
|
write!(f, "TopDocs({})", self.0.limit())
|
||||||
f,
|
|
||||||
"TopDocs(limit={}, offset={})",
|
|
||||||
self.0.limit, self.0.offset
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,7 +66,7 @@ struct ScorerByFastFieldReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||||
fn score(&mut self, doc: DocId) -> u64 {
|
fn score(&self, doc: DocId) -> u64 {
|
||||||
self.ff_reader.get_u64(u64::from(doc))
|
self.ff_reader.get_u64(u64::from(doc))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -107,45 +101,6 @@ impl TopDocs {
|
|||||||
TopDocs(TopCollector::with_limit(limit))
|
TopDocs(TopCollector::with_limit(limit))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Skip the first "offset" documents when collecting.
|
|
||||||
///
|
|
||||||
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
|
|
||||||
/// Lucene's TopDocsCollector.
|
|
||||||
///
|
|
||||||
/// ```rust
|
|
||||||
/// use tantivy::collector::TopDocs;
|
|
||||||
/// use tantivy::query::QueryParser;
|
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
|
||||||
/// use tantivy::{doc, DocAddress, Index};
|
|
||||||
///
|
|
||||||
/// let mut schema_builder = Schema::builder();
|
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
|
||||||
/// let schema = schema_builder.build();
|
|
||||||
/// let index = Index::create_in_ram(schema);
|
|
||||||
///
|
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Lena Mukhina"));
|
|
||||||
/// assert!(index_writer.commit().is_ok());
|
|
||||||
///
|
|
||||||
/// let reader = index.reader().unwrap();
|
|
||||||
/// let searcher = reader.searcher();
|
|
||||||
///
|
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
|
||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
|
||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
|
||||||
///
|
|
||||||
/// assert_eq!(top_docs.len(), 2);
|
|
||||||
/// assert_eq!(&top_docs[0], &(0.5204813, DocAddress(0, 4)));
|
|
||||||
/// assert_eq!(&top_docs[1], &(0.4793185, DocAddress(0, 3)));
|
|
||||||
/// ```
|
|
||||||
pub fn and_offset(self, offset: usize) -> TopDocs {
|
|
||||||
TopDocs(self.0.and_offset(offset))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set top-K to rank documents by a given fast field.
|
/// Set top-K to rank documents by a given fast field.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
@@ -326,7 +281,7 @@ impl TopDocs {
|
|||||||
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
||||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
||||||
{
|
{
|
||||||
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
TweakedScoreTopCollector::new(score_tweaker, self.0.limit())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ranks the documents using a custom score.
|
/// Ranks the documents using a custom score.
|
||||||
@@ -440,7 +395,7 @@ impl TopDocs {
|
|||||||
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
||||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
||||||
{
|
{
|
||||||
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
CustomScoreTopCollector::new(custom_score, self.0.limit())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -468,64 +423,6 @@ impl Collector for TopDocs {
|
|||||||
) -> crate::Result<Self::Fruit> {
|
) -> crate::Result<Self::Fruit> {
|
||||||
self.0.merge_fruits(child_fruits)
|
self.0.merge_fruits(child_fruits)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn collect_segment(
|
|
||||||
&self,
|
|
||||||
weight: &dyn Weight,
|
|
||||||
segment_ord: u32,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
|
||||||
let heap_len = self.0.limit + self.0.offset;
|
|
||||||
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
|
||||||
|
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
|
||||||
let mut threshold = f32::MIN;
|
|
||||||
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
|
||||||
if delete_bitset.is_deleted(doc) {
|
|
||||||
return threshold;
|
|
||||||
}
|
|
||||||
let heap_item = ComparableDoc {
|
|
||||||
feature: score,
|
|
||||||
doc,
|
|
||||||
};
|
|
||||||
if heap.len() < heap_len {
|
|
||||||
heap.push(heap_item);
|
|
||||||
if heap.len() == heap_len {
|
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
|
|
||||||
}
|
|
||||||
return threshold;
|
|
||||||
}
|
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN);
|
|
||||||
threshold
|
|
||||||
})?;
|
|
||||||
} else {
|
|
||||||
weight.for_each_pruning(f32::MIN, reader, &mut |doc, score| {
|
|
||||||
let heap_item = ComparableDoc {
|
|
||||||
feature: score,
|
|
||||||
doc,
|
|
||||||
};
|
|
||||||
if heap.len() < heap_len {
|
|
||||||
heap.push(heap_item);
|
|
||||||
// TODO the threshold is suboptimal for heap.len == heap_len
|
|
||||||
if heap.len() == heap_len {
|
|
||||||
return heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
|
|
||||||
} else {
|
|
||||||
return f32::MIN;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
|
||||||
heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let fruit = heap
|
|
||||||
.into_sorted_vec()
|
|
||||||
.into_iter()
|
|
||||||
.map(|cid| (cid.feature, DocAddress(segment_ord, cid.doc)))
|
|
||||||
.collect();
|
|
||||||
Ok(fruit)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Segment Collector associated to `TopDocs`.
|
/// Segment Collector associated to `TopDocs`.
|
||||||
@@ -535,7 +432,7 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
|||||||
type Fruit = Vec<(Score, DocAddress)>;
|
type Fruit = Vec<(Score, DocAddress)>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: DocId, score: Score) {
|
fn collect(&mut self, doc: DocId, score: Score) {
|
||||||
self.0.collect(doc, score);
|
self.0.collect(doc, score)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
fn harvest(self) -> Vec<(Score, DocAddress)> {
|
||||||
@@ -549,10 +446,10 @@ mod tests {
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::query::{AllQuery, Query, QueryParser};
|
use crate::query::{AllQuery, Query, QueryParser};
|
||||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||||
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::{DocAddress, DocId, SegmentReader};
|
|
||||||
|
|
||||||
fn make_index() -> Index {
|
fn make_index() -> Index {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -592,21 +489,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_not_at_capacity_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let score_docs: Vec<(Score, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(score_docs, vec![(0.48527452, DocAddress(0, 0))]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_at_capacity() {
|
fn test_top_collector_at_capacity() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
@@ -628,27 +510,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_at_capacity_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let score_docs: Vec<(Score, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
score_docs,
|
|
||||||
vec![
|
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
|
||||||
(0.48527452, DocAddress(0, 0))
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_stable_sorting() {
|
fn test_top_collector_stable_sorting() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
@@ -762,50 +623,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_tweak_score_top_collector_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let collector = TopDocs::with_limit(2).and_offset(1).tweak_score(
|
|
||||||
move |_segment_reader: &SegmentReader| move |doc: DocId, _original_score: Score| doc,
|
|
||||||
);
|
|
||||||
let score_docs: Vec<(u32, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &collector)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
score_docs,
|
|
||||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_custom_score_top_collector_with_offset() {
|
|
||||||
let index = make_index();
|
|
||||||
let field = index.schema().get_field("text").unwrap();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![field]);
|
|
||||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
|
||||||
let collector = TopDocs::with_limit(2)
|
|
||||||
.and_offset(1)
|
|
||||||
.custom_score(move |_segment_reader: &SegmentReader| move |doc: DocId| doc);
|
|
||||||
let score_docs: Vec<(u32, DocAddress)> = index
|
|
||||||
.reader()
|
|
||||||
.unwrap()
|
|
||||||
.searcher()
|
|
||||||
.search(&text_query, &collector)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
score_docs,
|
|
||||||
vec![(1, DocAddress(0, 1)), (0, DocAddress(0, 0)),]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index(
|
fn index(
|
||||||
query: &str,
|
query: &str,
|
||||||
query_field: Field,
|
query_field: Field,
|
||||||
|
|||||||
@@ -14,11 +14,11 @@ where
|
|||||||
{
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
score_tweaker: TScoreTweaker,
|
score_tweaker: TScoreTweaker,
|
||||||
collector: TopCollector<TScore>,
|
limit: usize,
|
||||||
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
) -> TweakedScoreTopCollector<TScoreTweaker, TScore> {
|
||||||
TweakedScoreTopCollector {
|
TweakedScoreTopCollector {
|
||||||
score_tweaker,
|
score_tweaker,
|
||||||
collector,
|
collector: TopCollector::with_limit(limit),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -29,7 +29,7 @@ where
|
|||||||
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
/// It is the segment local version of the [`ScoreTweaker`](./trait.ScoreTweaker.html).
|
||||||
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
pub trait ScoreSegmentTweaker<TScore>: 'static {
|
||||||
/// Tweak the given `score` for the document `doc`.
|
/// Tweak the given `score` for the document `doc`.
|
||||||
fn score(&mut self, doc: DocId, score: Score) -> TScore;
|
fn score(&self, doc: DocId, score: Score) -> TScore;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `ScoreTweaker` makes it possible to tweak the score
|
/// `ScoreTweaker` makes it possible to tweak the score
|
||||||
@@ -121,9 +121,9 @@ where
|
|||||||
|
|
||||||
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
impl<F, TScore> ScoreSegmentTweaker<TScore> for F
|
||||||
where
|
where
|
||||||
F: 'static + FnMut(DocId, Score) -> TScore,
|
F: 'static + Sync + Send + Fn(DocId, Score) -> TScore,
|
||||||
{
|
{
|
||||||
fn score(&mut self, doc: DocId, score: Score) -> TScore {
|
fn score(&self, doc: DocId, score: Score) -> TScore {
|
||||||
(self)(doc, score)
|
(self)(doc, score)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,10 +33,6 @@ impl TinySet {
|
|||||||
TinySet(0u64)
|
TinySet(0u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.0 = 0u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the complement of the set in `[0, 64[`.
|
/// Returns the complement of the set in `[0, 64[`.
|
||||||
fn complement(self) -> TinySet {
|
fn complement(self) -> TinySet {
|
||||||
TinySet(!self.0)
|
TinySet(!self.0)
|
||||||
@@ -47,11 +43,6 @@ impl TinySet {
|
|||||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of elements in the TinySet.
|
|
||||||
pub fn len(self) -> u32 {
|
|
||||||
self.0.count_ones()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the intersection of `self` and `other`
|
/// Returns the intersection of `self` and `other`
|
||||||
pub fn intersect(self, other: TinySet) -> TinySet {
|
pub fn intersect(self, other: TinySet) -> TinySet {
|
||||||
TinySet(self.0 & other.0)
|
TinySet(self.0 & other.0)
|
||||||
@@ -118,12 +109,22 @@ impl TinySet {
|
|||||||
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
pub fn range_greater_or_equal(from_included: u32) -> TinySet {
|
||||||
TinySet::range_lower(from_included).complement()
|
TinySet::range_lower(from_included).complement()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.0 = 0u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(self) -> u32 {
|
||||||
|
self.0.count_ones()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BitSet {
|
pub struct BitSet {
|
||||||
tinysets: Box<[TinySet]>,
|
tinysets: Box<[TinySet]>,
|
||||||
len: usize,
|
len: usize, //< Technically it should be u32, but we
|
||||||
|
// count multiple inserts.
|
||||||
|
// `usize` guards us from overflow.
|
||||||
max_value: u32,
|
max_value: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,7 +204,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::BitSet;
|
use super::BitSet;
|
||||||
use super::TinySet;
|
use super::TinySet;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::query::BitSetDocSet;
|
use crate::query::BitSetDocSet;
|
||||||
use crate::tests;
|
use crate::tests;
|
||||||
use crate::tests::generate_nonunique_unsorted;
|
use crate::tests::generate_nonunique_unsorted;
|
||||||
@@ -277,13 +278,11 @@ mod tests {
|
|||||||
}
|
}
|
||||||
assert_eq!(btreeset.len(), bitset.len());
|
assert_eq!(btreeset.len(), bitset.len());
|
||||||
let mut bitset_docset = BitSetDocSet::from(bitset);
|
let mut bitset_docset = BitSetDocSet::from(bitset);
|
||||||
let mut remaining = true;
|
|
||||||
for el in btreeset.into_iter() {
|
for el in btreeset.into_iter() {
|
||||||
assert!(remaining);
|
bitset_docset.advance();
|
||||||
assert_eq!(bitset_docset.doc(), el);
|
assert_eq!(bitset_docset.doc(), el);
|
||||||
remaining = bitset_docset.advance() != TERMINATED;
|
|
||||||
}
|
}
|
||||||
assert!(!remaining);
|
assert!(!bitset_docset.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -190,11 +190,12 @@ mod test {
|
|||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_composite_file() {
|
fn test_composite_file() {
|
||||||
let path = Path::new("test_path");
|
let path = Path::new("test_path");
|
||||||
let mut directory = RAMDirectory::create();
|
let mut directory = RAMDirectory::default();
|
||||||
{
|
{
|
||||||
let w = directory.open_write(path).unwrap();
|
let w = directory.open_write(path).unwrap();
|
||||||
let mut composite_write = CompositeWrite::wrap(w);
|
let mut composite_write = CompositeWrite::wrap(w);
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ pub mod bitpacker;
|
|||||||
mod bitset;
|
mod bitset;
|
||||||
mod composite_file;
|
mod composite_file;
|
||||||
mod counting_writer;
|
mod counting_writer;
|
||||||
|
mod mutable_enum;
|
||||||
mod serialize;
|
mod serialize;
|
||||||
mod vint;
|
mod vint;
|
||||||
|
|
||||||
@@ -9,6 +10,7 @@ pub use self::bitset::BitSet;
|
|||||||
pub(crate) use self::bitset::TinySet;
|
pub(crate) use self::bitset::TinySet;
|
||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::counting_writer::CountingWriter;
|
pub use self::counting_writer::CountingWriter;
|
||||||
|
pub(crate) use self::mutable_enum::MutableEnum;
|
||||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||||
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
||||||
pub use byteorder::LittleEndian as Endianness;
|
pub use byteorder::LittleEndian as Endianness;
|
||||||
|
|||||||
37
src/common/mutable_enum.rs
Normal file
37
src/common/mutable_enum.rs
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
|
||||||
|
pub(crate) struct MutableEnum<T>(Option<T>);
|
||||||
|
|
||||||
|
impl<T> MutableEnum<T> {
|
||||||
|
pub fn wrap(val: T) -> Self {
|
||||||
|
MutableEnum(Some(val))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into(self) -> T {
|
||||||
|
self.0.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> MutableEnum<T> {
|
||||||
|
pub fn map_mutate<E, F>(&mut self, transformation: F) -> Result<(), E>
|
||||||
|
where
|
||||||
|
F: FnOnce(T) -> Result<T, E>,
|
||||||
|
{
|
||||||
|
self.0 = self.0.take().map(transformation).transpose()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Deref for MutableEnum<T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.0.as_ref().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> DerefMut for MutableEnum<T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
self.0.as_mut().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -12,8 +12,8 @@ use crate::directory::INDEX_WRITER_LOCK;
|
|||||||
use crate::directory::{Directory, RAMDirectory};
|
use crate::directory::{Directory, RAMDirectory};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::indexer::index_writer::HEAP_SIZE_MIN;
|
|
||||||
use crate::indexer::segment_updater::save_new_metas;
|
use crate::indexer::segment_updater::save_new_metas;
|
||||||
|
use crate::indexer::IndexWriterConfig;
|
||||||
use crate::reader::IndexReader;
|
use crate::reader::IndexReader;
|
||||||
use crate::reader::IndexReaderBuilder;
|
use crate::reader::IndexReaderBuilder;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
@@ -21,6 +21,7 @@ use crate::schema::FieldType;
|
|||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
|
use num_cpus;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -272,30 +273,14 @@ impl Index {
|
|||||||
pub fn writer_with_num_threads(
|
pub fn writer_with_num_threads(
|
||||||
&self,
|
&self,
|
||||||
num_threads: usize,
|
num_threads: usize,
|
||||||
overall_heap_size_in_bytes: usize,
|
overall_heap_size_in_bytes: u64,
|
||||||
) -> crate::Result<IndexWriter> {
|
) -> crate::Result<IndexWriter> {
|
||||||
let directory_lock = self
|
let config = IndexWriterConfig {
|
||||||
.directory
|
max_indexing_threads: num_threads,
|
||||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
memory_budget: overall_heap_size_in_bytes,
|
||||||
.map_err(|err| {
|
..Default::default()
|
||||||
TantivyError::LockFailure(
|
};
|
||||||
err,
|
self.writer_from_config(config)
|
||||||
Some(
|
|
||||||
"Failed to acquire index lock. If you are using\
|
|
||||||
a regular directory, this means there is already an \
|
|
||||||
`IndexWriter` working on this `Directory`, in this process \
|
|
||||||
or in a different process."
|
|
||||||
.to_string(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
|
||||||
IndexWriter::new(
|
|
||||||
self,
|
|
||||||
num_threads,
|
|
||||||
heap_size_in_bytes_per_thread,
|
|
||||||
directory_lock,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a multithreaded writer
|
/// Creates a multithreaded writer
|
||||||
@@ -308,13 +293,36 @@ impl Index {
|
|||||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// If the heap size per thread is too small, panics.
|
/// If the heap size per thread is too small, panics.
|
||||||
pub fn writer(&self, overall_heap_size_in_bytes: usize) -> crate::Result<IndexWriter> {
|
pub fn writer(&self, overall_heap_size_in_bytes: u64) -> crate::Result<IndexWriter> {
|
||||||
let mut num_threads = num_cpus::get();
|
let config = IndexWriterConfig {
|
||||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
max_indexing_threads: num_cpus::get(),
|
||||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
memory_budget: overall_heap_size_in_bytes,
|
||||||
num_threads = (overall_heap_size_in_bytes / HEAP_SIZE_MIN).max(1);
|
..Default::default()
|
||||||
}
|
};
|
||||||
self.writer_with_num_threads(num_threads, overall_heap_size_in_bytes)
|
self.writer_from_config(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new writer with a given configuration.
|
||||||
|
///
|
||||||
|
/// See [`IndexWriterConfig`](./struct.IndexWriterConfig.html) for more information.
|
||||||
|
pub fn writer_from_config(&self, mut config: IndexWriterConfig) -> crate::Result<IndexWriter> {
|
||||||
|
config.validate()?;
|
||||||
|
let directory_lock = self
|
||||||
|
.directory
|
||||||
|
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||||
|
.map_err(|err| {
|
||||||
|
TantivyError::LockFailure(
|
||||||
|
err,
|
||||||
|
Some(
|
||||||
|
"Failed to acquire index lock. If you are using \
|
||||||
|
a regular directory, this means there is already an \
|
||||||
|
`IndexWriter` working on this `Directory`, in this process \
|
||||||
|
or in a different process."
|
||||||
|
.to_string(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
IndexWriter::new(self, config, directory_lock)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the index schema
|
/// Accessor to the index schema
|
||||||
@@ -468,7 +476,7 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
@@ -492,7 +500,7 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -534,12 +542,16 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
test_index_on_commit_reload_policy_aux(field, read_index, &write_index, &reader);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
fn test_index_on_commit_reload_policy_aux(
|
||||||
let mut reader_index = reader.index();
|
field: Field,
|
||||||
|
mut reader_index: Index,
|
||||||
|
index: &Index,
|
||||||
|
reader: &IndexReader,
|
||||||
|
) {
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
|
|||||||
@@ -3,7 +3,9 @@ use crate::core::SegmentId;
|
|||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
|
use serde;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use crate::schema::FieldType;
|
|||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
|
use owned_read::OwnedRead;
|
||||||
|
|
||||||
/// The inverted index reader is in charge of accessing
|
/// The inverted index reader is in charge of accessing
|
||||||
/// the inverted index associated to a specific field.
|
/// the inverted index associated to a specific field.
|
||||||
@@ -96,7 +97,8 @@ impl InvertedIndexReader {
|
|||||||
let offset = term_info.postings_offset as usize;
|
let offset = term_info.postings_offset as usize;
|
||||||
let end_source = self.postings_source.len();
|
let end_source = self.postings_source.len();
|
||||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||||
block_postings.reset(term_info.doc_freq, postings_slice);
|
let postings_reader = OwnedRead::new(postings_slice);
|
||||||
|
block_postings.reset(term_info.doc_freq, postings_reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a block postings given a `Term`.
|
/// Returns a block postings given a `Term`.
|
||||||
@@ -125,7 +127,7 @@ impl InvertedIndexReader {
|
|||||||
let postings_data = self.postings_source.slice_from(offset);
|
let postings_data = self.postings_source.slice_from(offset);
|
||||||
BlockSegmentPostings::from_data(
|
BlockSegmentPostings::from_data(
|
||||||
term_info.doc_freq,
|
term_info.doc_freq,
|
||||||
postings_data,
|
OwnedRead::new(postings_data),
|
||||||
self.record_option,
|
self.record_option,
|
||||||
requested_option,
|
requested_option,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
|
use crate::collector::SegmentCollector;
|
||||||
use crate::core::Executor;
|
use crate::core::Executor;
|
||||||
use crate::core::InvertedIndexReader;
|
use crate::core::InvertedIndexReader;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
|
use crate::query::Scorer;
|
||||||
|
use crate::query::Weight;
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::{Field, Term};
|
use crate::schema::{Field, Term};
|
||||||
@@ -14,6 +17,26 @@ use crate::Index;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
fn collect_segment<C: Collector>(
|
||||||
|
collector: &C,
|
||||||
|
weight: &dyn Weight,
|
||||||
|
segment_ord: u32,
|
||||||
|
segment_reader: &SegmentReader,
|
||||||
|
) -> crate::Result<C::Fruit> {
|
||||||
|
let mut scorer = weight.scorer(segment_reader, 1.0f32)?;
|
||||||
|
let mut segment_collector = collector.for_segment(segment_ord as u32, segment_reader)?;
|
||||||
|
if let Some(delete_bitset) = segment_reader.delete_bitset() {
|
||||||
|
scorer.for_each(&mut |doc, score| {
|
||||||
|
if delete_bitset.is_alive(doc) {
|
||||||
|
segment_collector.collect(doc, score);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
scorer.for_each(&mut |doc, score| segment_collector.collect(doc, score));
|
||||||
|
}
|
||||||
|
Ok(segment_collector.harvest())
|
||||||
|
}
|
||||||
|
|
||||||
/// Holds a list of `SegmentReader`s ready for search.
|
/// Holds a list of `SegmentReader`s ready for search.
|
||||||
///
|
///
|
||||||
/// It guarantees that the `Segment` will not be removed before
|
/// It guarantees that the `Segment` will not be removed before
|
||||||
@@ -140,7 +163,12 @@ impl Searcher {
|
|||||||
let segment_readers = self.segment_readers();
|
let segment_readers = self.segment_readers();
|
||||||
let fruits = executor.map(
|
let fruits = executor.map(
|
||||||
|(segment_ord, segment_reader)| {
|
|(segment_ord, segment_reader)| {
|
||||||
collector.collect_segment(weight.as_ref(), segment_ord as u32, segment_reader)
|
collect_segment(
|
||||||
|
collector,
|
||||||
|
weight.as_ref(),
|
||||||
|
segment_ord as u32,
|
||||||
|
segment_reader,
|
||||||
|
)
|
||||||
},
|
},
|
||||||
segment_readers.iter().enumerate(),
|
segment_readers.iter().enumerate(),
|
||||||
)?;
|
)?;
|
||||||
|
|||||||
@@ -3,41 +3,140 @@ use crate::core::Index;
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||||
use crate::directory::Directory;
|
use crate::directory::{Directory, ManagedDirectory, RAMDirectory};
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{ReadOnlySource, WritePtr};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use crate::indexer::{ResourceManager};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) enum SegmentDirectory {
|
||||||
|
Persisted(ManagedDirectory),
|
||||||
|
Volatile {
|
||||||
|
directory: RAMDirectory,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentDirectory {
|
||||||
|
fn new_volatile(memory_manager: ResourceManager) -> SegmentDirectory {
|
||||||
|
SegmentDirectory::Volatile {
|
||||||
|
directory: RAMDirectory::create_with_memory_manager(memory_manager),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for SegmentDirectory {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
SegmentDirectory::Volatile { .. } => write!(f, "volatile")?,
|
||||||
|
SegmentDirectory::Persisted(dir) => write!(f, "Persisted({:?})", dir)?,
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ManagedDirectory> for SegmentDirectory {
|
||||||
|
fn from(directory: ManagedDirectory) -> Self {
|
||||||
|
SegmentDirectory::Persisted(directory)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for SegmentDirectory {
|
||||||
|
type Target = dyn Directory;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
match self {
|
||||||
|
SegmentDirectory::Volatile {
|
||||||
|
directory, ..
|
||||||
|
} => directory,
|
||||||
|
SegmentDirectory::Persisted(dir) => dir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DerefMut for SegmentDirectory {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
match self {
|
||||||
|
SegmentDirectory::Volatile { directory, .. } => directory,
|
||||||
|
SegmentDirectory::Persisted(dir) => dir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A segment is a piece of the index.
|
/// A segment is a piece of the index.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Segment {
|
pub struct Segment {
|
||||||
index: Index,
|
schema: Schema,
|
||||||
meta: SegmentMeta,
|
meta: SegmentMeta,
|
||||||
|
directory: SegmentDirectory,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for Segment {
|
impl fmt::Debug for Segment {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "Segment({:?})", self.id().uuid_string())
|
write!(
|
||||||
|
f,
|
||||||
|
"Segment(id={:?}, directory={:?})",
|
||||||
|
self.id().uuid_string(),
|
||||||
|
self.directory
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Segment {
|
impl Segment {
|
||||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
pub(crate) fn new_persisted(
|
||||||
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
meta: SegmentMeta,
|
||||||
Segment { index, meta }
|
directory: ManagedDirectory,
|
||||||
|
schema: Schema,
|
||||||
|
) -> Segment {
|
||||||
|
Segment {
|
||||||
|
meta,
|
||||||
|
schema,
|
||||||
|
directory: SegmentDirectory::from(directory),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the index the segment belongs to.
|
/// Creates a new segment that embeds its own `RAMDirectory`.
|
||||||
pub fn index(&self) -> &Index {
|
///
|
||||||
&self.index
|
/// That segment is entirely dissociated from the index directory.
|
||||||
|
/// It will be persisted by a background thread in charge of IO.
|
||||||
|
pub fn new_volatile(meta: SegmentMeta, schema: Schema, resource_manager: ResourceManager) -> Segment {
|
||||||
|
Segment {
|
||||||
|
schema,
|
||||||
|
meta,
|
||||||
|
directory: SegmentDirectory::new_volatile(resource_manager),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||||
|
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||||
|
let segment_directory = index.directory().clone();
|
||||||
|
Segment::new_persisted(meta, segment_directory, index.schema())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Persists a given `Segment` to a directory.
|
||||||
|
pub fn persist(&mut self, mut dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||||
|
if let SegmentDirectory::Persisted(_) = self.directory {
|
||||||
|
// this segment is already persisted.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
if let SegmentDirectory::Volatile { directory, ..} = &self.directory {
|
||||||
|
directory.persist(&mut dest_directory)?;
|
||||||
|
}
|
||||||
|
self.directory = SegmentDirectory::Persisted(dest_directory);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_volatile(&self, memory_manager: ResourceManager) -> Segment {
|
||||||
|
Segment::new_volatile(self.meta.clone(), self.schema.clone(), memory_manager)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns our index's schema.
|
/// Returns our index's schema.
|
||||||
pub fn schema(&self) -> Schema {
|
pub fn schema(&self) -> Schema {
|
||||||
self.index.schema()
|
self.schema.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the segment meta-information
|
/// Returns the segment meta-information
|
||||||
@@ -51,16 +150,18 @@ impl Segment {
|
|||||||
/// as we finalize a fresh new segment.
|
/// as we finalize a fresh new segment.
|
||||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||||
Segment {
|
Segment {
|
||||||
index: self.index,
|
schema: self.schema,
|
||||||
meta: self.meta.with_max_doc(max_doc),
|
meta: self.meta.with_max_doc(max_doc),
|
||||||
|
directory: self.directory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||||
Segment {
|
Segment {
|
||||||
index: self.index,
|
schema: self.schema,
|
||||||
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
||||||
|
directory: self.directory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,15 +181,15 @@ impl Segment {
|
|||||||
/// Open one of the component file for a *regular* read.
|
/// Open one of the component file for a *regular* read.
|
||||||
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
|
||||||
let path = self.relative_path(component);
|
let path = self.relative_path(component);
|
||||||
let source = self.index.directory().open_read(&path)?;
|
let source = self.directory.open_read(&path)?;
|
||||||
Ok(source)
|
Ok(source)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open one of the component file for *regular* write.
|
/// Open one of the component file for *regular* write.
|
||||||
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
|
||||||
let path = self.relative_path(component);
|
let path = self.relative_path(component);
|
||||||
let write = self.index.directory_mut().open_write(&path)?;
|
let wrt = self.directory.open_write(&path)?;
|
||||||
Ok(write)
|
Ok(wrt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -295,8 +295,8 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator that will iterate over the alive document ids
|
/// Returns an iterator that will iterate over the alive document ids
|
||||||
pub fn doc_ids_alive<'a>(&'a self) -> impl Iterator<Item = DocId> + 'a {
|
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
|
||||||
(0u32..self.max_doc).filter(move |doc| !self.is_deleted(*doc))
|
SegmentReaderAliveDocsIterator::new(&self)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Summarize total space usage of this segment.
|
/// Summarize total space usage of this segment.
|
||||||
@@ -324,6 +324,52 @@ impl fmt::Debug for SegmentReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Implements the iterator trait to allow easy iteration
|
||||||
|
/// over non-deleted ("alive") DocIds in a SegmentReader
|
||||||
|
pub struct SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
reader: &'a SegmentReader,
|
||||||
|
max_doc: DocId,
|
||||||
|
current: DocId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
SegmentReaderAliveDocsIterator {
|
||||||
|
reader,
|
||||||
|
max_doc: reader.max_doc(),
|
||||||
|
current: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
|
||||||
|
type Item = DocId;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
// TODO: Use TinySet (like in BitSetDocSet) to speed this process up
|
||||||
|
if self.current >= self.max_doc {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the next alive doc id
|
||||||
|
while self.reader.is_deleted(self.current) {
|
||||||
|
self.current += 1;
|
||||||
|
|
||||||
|
if self.current >= self.max_doc {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// capture the current alive DocId
|
||||||
|
let result = Some(self.current);
|
||||||
|
|
||||||
|
// move down the chain
|
||||||
|
self.current += 1;
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ use crate::error::DataCorruption;
|
|||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
|
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
|
use serde_json;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
|
use fs2;
|
||||||
|
use notify;
|
||||||
|
|
||||||
|
use self::fs2::FileExt;
|
||||||
|
use self::notify::RawEvent;
|
||||||
|
use self::notify::RecursiveMode;
|
||||||
|
use self::notify::Watcher;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::error::LockError;
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{
|
use crate::directory::error::{
|
||||||
@@ -13,11 +20,8 @@ use crate::directory::WatchCallback;
|
|||||||
use crate::directory::WatchCallbackList;
|
use crate::directory::WatchCallbackList;
|
||||||
use crate::directory::WatchHandle;
|
use crate::directory::WatchHandle;
|
||||||
use crate::directory::{TerminatingWrite, WritePtr};
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
use fs2::FileExt;
|
use atomicwrites;
|
||||||
use memmap::Mmap;
|
use memmap::Mmap;
|
||||||
use notify::RawEvent;
|
|
||||||
use notify::RecursiveMode;
|
|
||||||
use notify::Watcher;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
@@ -220,13 +224,17 @@ struct MmapDirectoryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MmapDirectoryInner {
|
impl MmapDirectoryInner {
|
||||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
|
fn new(
|
||||||
MmapDirectoryInner {
|
root_path: PathBuf,
|
||||||
|
temp_directory: Option<TempDir>,
|
||||||
|
) -> Result<MmapDirectoryInner, OpenDirectoryError> {
|
||||||
|
let mmap_directory_inner = MmapDirectoryInner {
|
||||||
root_path,
|
root_path,
|
||||||
mmap_cache: Default::default(),
|
mmap_cache: Default::default(),
|
||||||
_temp_directory: temp_directory,
|
_temp_directory: temp_directory,
|
||||||
watcher: RwLock::new(None),
|
watcher: RwLock::new(None),
|
||||||
}
|
};
|
||||||
|
Ok(mmap_directory_inner)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
|
||||||
@@ -260,11 +268,14 @@ impl fmt::Debug for MmapDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MmapDirectory {
|
impl MmapDirectory {
|
||||||
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectory {
|
fn new(
|
||||||
let inner = MmapDirectoryInner::new(root_path, temp_directory);
|
root_path: PathBuf,
|
||||||
MmapDirectory {
|
temp_directory: Option<TempDir>,
|
||||||
|
) -> Result<MmapDirectory, OpenDirectoryError> {
|
||||||
|
let inner = MmapDirectoryInner::new(root_path, temp_directory)?;
|
||||||
|
Ok(MmapDirectory {
|
||||||
inner: Arc::new(inner),
|
inner: Arc::new(inner),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new MmapDirectory in a temporary directory.
|
/// Creates a new MmapDirectory in a temporary directory.
|
||||||
@@ -274,7 +285,7 @@ impl MmapDirectory {
|
|||||||
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
|
||||||
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
Ok(MmapDirectory::new(tempdir_path, Some(tempdir)))
|
MmapDirectory::new(tempdir_path, Some(tempdir))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens a MmapDirectory in a directory.
|
/// Opens a MmapDirectory in a directory.
|
||||||
@@ -292,7 +303,7 @@ impl MmapDirectory {
|
|||||||
directory_path,
|
directory_path,
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
Ok(MmapDirectory::new(PathBuf::from(directory_path), None))
|
Ok(MmapDirectory::new(PathBuf::from(directory_path), None)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,10 @@ mod footer;
|
|||||||
mod managed_directory;
|
mod managed_directory;
|
||||||
mod ram_directory;
|
mod ram_directory;
|
||||||
mod read_only_source;
|
mod read_only_source;
|
||||||
|
mod spilling_writer;
|
||||||
mod watch_event_router;
|
mod watch_event_router;
|
||||||
|
mod persistor;
|
||||||
|
|
||||||
|
|
||||||
/// Errors specific to the directory module.
|
/// Errors specific to the directory module.
|
||||||
pub mod error;
|
pub mod error;
|
||||||
@@ -23,6 +26,7 @@ pub use self::directory::{Directory, DirectoryClone};
|
|||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
pub use self::ram_directory::RAMDirectory;
|
pub use self::ram_directory::RAMDirectory;
|
||||||
pub use self::read_only_source::ReadOnlySource;
|
pub use self::read_only_source::ReadOnlySource;
|
||||||
|
pub(crate) use self::spilling_writer::{SpillingResult, SpillingWriter};
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||||
use std::io::{self, BufWriter, Write};
|
use std::io::{self, BufWriter, Write};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@@ -79,10 +83,16 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TerminatingWrite for Vec<u8> {
|
||||||
|
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
||||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||||
self.flush()
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
38
src/directory/persistor.rs
Normal file
38
src/directory/persistor.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
use crate::indexer::{SegmentManager, ResourceManager, MergeOperationInventory};
|
||||||
|
use std::thread::JoinHandle;
|
||||||
|
use crate::{IndexWriterConfig, SegmentId};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
pub(crate) struct Persistor {
|
||||||
|
memory_manager: ResourceManager,
|
||||||
|
thread_handle: JoinHandle<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Persistor {
|
||||||
|
pub(crate) fn create_and_start(segment_manager: SegmentManager,
|
||||||
|
memory_manager: ResourceManager,
|
||||||
|
config: IndexWriterConfig) -> crate::Result<Persistor> {
|
||||||
|
let memory_manager_clone = memory_manager.clone();
|
||||||
|
let thread_handle = std::thread::Builder::new()
|
||||||
|
.name("persistor-thread".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
while let Ok(_) = memory_manager_clone.wait_until_in_range(config.persist_low..) {
|
||||||
|
segment_manager.largest_segment_not_in_merge();
|
||||||
|
}
|
||||||
|
}).map_err(|_err| crate::TantivyError::ErrorInThread("Failed to start persistor thread.".to_string()))?;
|
||||||
|
Ok(Persistor {
|
||||||
|
memory_manager,
|
||||||
|
thread_handle
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stop the persisting thread.
|
||||||
|
///
|
||||||
|
/// The memory manager will be terminated, which will unlock the thread from any waiting
|
||||||
|
/// position.
|
||||||
|
/// This method blocks for a short amount of tim until the persistor thread has terminated.
|
||||||
|
pub fn stop(self) {
|
||||||
|
self.memory_manager.terminate();
|
||||||
|
let _ = self.thread_handle.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -11,6 +11,7 @@ use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
use crate::indexer::ResourceManager;
|
||||||
|
|
||||||
/// Writer associated with the `RAMDirectory`
|
/// Writer associated with the `RAMDirectory`
|
||||||
///
|
///
|
||||||
@@ -82,11 +83,12 @@ impl TerminatingWrite for VecWriter {
|
|||||||
struct InnerDirectory {
|
struct InnerDirectory {
|
||||||
fs: HashMap<PathBuf, ReadOnlySource>,
|
fs: HashMap<PathBuf, ReadOnlySource>,
|
||||||
watch_router: WatchCallbackList,
|
watch_router: WatchCallbackList,
|
||||||
|
memory_manager: ResourceManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerDirectory {
|
impl InnerDirectory {
|
||||||
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
|
||||||
let data = ReadOnlySource::new(Vec::from(data));
|
let data = ReadOnlySource::new_with_allocation(Vec::from(data), &self.memory_manager);
|
||||||
self.fs.insert(path, data).is_some()
|
self.fs.insert(path, data).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,8 +114,8 @@ impl InnerDirectory {
|
|||||||
self.watch_router.subscribe(watch_handle)
|
self.watch_router.subscribe(watch_handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn total_mem_usage(&self) -> usize {
|
fn total_mem_usage(&self) -> u64 {
|
||||||
self.fs.values().map(|f| f.len()).sum()
|
self.fs.values().map(|source| source.len() as u64).sum()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,14 +136,30 @@ pub struct RAMDirectory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RAMDirectory {
|
impl RAMDirectory {
|
||||||
/// Constructor
|
|
||||||
|
/// Creates a new RAMDirectory.
|
||||||
|
///
|
||||||
|
/// Check `.create_with_memory_manager(..)` if you want to associate an external memory
|
||||||
|
/// manager to your RAMDirectory.
|
||||||
pub fn create() -> RAMDirectory {
|
pub fn create() -> RAMDirectory {
|
||||||
Self::default()
|
RAMDirectory::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructor
|
||||||
|
pub fn create_with_memory_manager(memory_manager: ResourceManager) -> RAMDirectory {
|
||||||
|
let inner_directory = InnerDirectory {
|
||||||
|
fs: Default::default(),
|
||||||
|
watch_router: Default::default(),
|
||||||
|
memory_manager
|
||||||
|
};
|
||||||
|
RAMDirectory {
|
||||||
|
fs: Arc::new(RwLock::new(inner_directory))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the sum of the size of the different files
|
/// Returns the sum of the size of the different files
|
||||||
/// in the RAMDirectory.
|
/// in the RAMDirectory.
|
||||||
pub fn total_mem_usage(&self) -> usize {
|
pub fn total_mem_usage(&self) -> u64 {
|
||||||
self.fs.read().unwrap().total_mem_usage()
|
self.fs.read().unwrap().total_mem_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -227,6 +245,9 @@ mod tests {
|
|||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use crate::indexer::ResourceManager;
|
||||||
|
use crate::directory::TerminatingWrite;
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_persist() {
|
fn test_persist() {
|
||||||
@@ -244,4 +265,59 @@ mod tests {
|
|||||||
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
|
||||||
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_manager_several_path() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcd").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 4u64);
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/toto"), b"abcde").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 9u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_manager_override() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcde").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcdef").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 6u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_manager_seq_wrt() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
let mut wrt = ram_directory.open_write(Path::new("/titi")).unwrap();
|
||||||
|
assert!(wrt.write_all(b"abcde").is_ok());
|
||||||
|
assert!(wrt.terminate().is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
assert!(ram_directory.atomic_write(Path::new("/titi"), b"abcdef").is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 6u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_release_on_drop() {
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
let mut ram_directory = RAMDirectory::create_with_memory_manager(memory_manager.clone());
|
||||||
|
let mut wrt = ram_directory.open_write(Path::new("/titi")).unwrap();
|
||||||
|
assert!(wrt.write_all(b"abcde").is_ok());
|
||||||
|
assert!(wrt.terminate().is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
let mut wrt2 = ram_directory.open_write(Path::new("/toto")).unwrap();
|
||||||
|
assert!(wrt2.write_all(b"abcdefghijkl").is_ok());
|
||||||
|
assert!(wrt2.terminate().is_ok());
|
||||||
|
assert_eq!(memory_manager.total_amount(), 17u64);
|
||||||
|
let source = ram_directory.open_read(Path::new("/titi")).unwrap();
|
||||||
|
let source_clone = source.clone();
|
||||||
|
assert_eq!(memory_manager.total_amount(), 17u64);
|
||||||
|
mem::drop(ram_directory);
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
mem::drop(source);
|
||||||
|
assert_eq!(memory_manager.total_amount(), 5u64);
|
||||||
|
mem::drop(source_clone);
|
||||||
|
assert_eq!(memory_manager.total_amount(), 0u64);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use crate::common::HasLen;
|
|||||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use crate::indexer::{Allocation, ResourceManager};
|
||||||
|
|
||||||
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||||
|
|
||||||
@@ -15,6 +16,7 @@ pub struct ReadOnlySource {
|
|||||||
data: Arc<BoxedData>,
|
data: Arc<BoxedData>,
|
||||||
start: usize,
|
start: usize,
|
||||||
stop: usize,
|
stop: usize,
|
||||||
|
allocation: Option<Arc<Allocation>>
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl StableDeref for ReadOnlySource {}
|
unsafe impl StableDeref for ReadOnlySource {}
|
||||||
@@ -35,11 +37,13 @@ impl From<Arc<BoxedData>> for ReadOnlySource {
|
|||||||
data,
|
data,
|
||||||
start: 0,
|
start: 0,
|
||||||
stop: len,
|
stop: len,
|
||||||
|
allocation: None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReadOnlySource {
|
impl ReadOnlySource {
|
||||||
|
|
||||||
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
pub(crate) fn new<D>(data: D) -> ReadOnlySource
|
||||||
where
|
where
|
||||||
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||||
@@ -49,9 +53,24 @@ impl ReadOnlySource {
|
|||||||
data: Arc::new(Box::new(data)),
|
data: Arc::new(Box::new(data)),
|
||||||
start: 0,
|
start: 0,
|
||||||
stop: len,
|
stop: len,
|
||||||
|
allocation: None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_with_allocation<D>(data: D, memory_manager: &ResourceManager) -> ReadOnlySource
|
||||||
|
where
|
||||||
|
D: Deref<Target = [u8]> + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
let len = data.len();
|
||||||
|
ReadOnlySource {
|
||||||
|
data: Arc::new(Box::new(data)),
|
||||||
|
start: 0,
|
||||||
|
stop: len,
|
||||||
|
allocation: Some(Arc::new(memory_manager.allocate(len as u64)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Creates an empty ReadOnlySource
|
/// Creates an empty ReadOnlySource
|
||||||
pub fn empty() -> ReadOnlySource {
|
pub fn empty() -> ReadOnlySource {
|
||||||
ReadOnlySource::new(&[][..])
|
ReadOnlySource::new(&[][..])
|
||||||
@@ -98,6 +117,7 @@ impl ReadOnlySource {
|
|||||||
data: self.data.clone(),
|
data: self.data.clone(),
|
||||||
start: self.start + start,
|
start: self.start + start,
|
||||||
stop: self.start + stop,
|
stop: self.start + stop,
|
||||||
|
allocation: self.allocation.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
186
src/directory/spilling_writer.rs
Normal file
186
src/directory/spilling_writer.rs
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
use crate::common::MutableEnum;
|
||||||
|
use crate::directory::{TerminatingWrite, WritePtr};
|
||||||
|
use std::io::{self, Write};
|
||||||
|
|
||||||
|
/// Represents the state of the `SpillingWriter`.
|
||||||
|
enum SpillingState {
|
||||||
|
Buffer {
|
||||||
|
buffer: Vec<u8>,
|
||||||
|
capacity: usize,
|
||||||
|
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||||
|
},
|
||||||
|
Spilled(WritePtr),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpillingState {
|
||||||
|
fn new(
|
||||||
|
limit: usize,
|
||||||
|
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||||
|
) -> SpillingState {
|
||||||
|
SpillingState::Buffer {
|
||||||
|
buffer: Vec::with_capacity(limit),
|
||||||
|
capacity: limit,
|
||||||
|
write_factory,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the state in such a way that it is ready to accept
|
||||||
|
// `extra_capacity` bytes.
|
||||||
|
//
|
||||||
|
fn reserve(self, extra_capacity: usize) -> io::Result<SpillingState> {
|
||||||
|
match self {
|
||||||
|
SpillingState::Buffer {
|
||||||
|
buffer,
|
||||||
|
capacity,
|
||||||
|
write_factory,
|
||||||
|
} => {
|
||||||
|
if capacity >= extra_capacity {
|
||||||
|
Ok(SpillingState::Buffer {
|
||||||
|
buffer,
|
||||||
|
capacity: capacity - extra_capacity,
|
||||||
|
write_factory,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
let mut wrt = write_factory()?;
|
||||||
|
wrt.write_all(&buffer[..])?;
|
||||||
|
Ok(SpillingState::Spilled(wrt))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SpillingState::Spilled(wrt) => Ok(SpillingState::Spilled(wrt)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The `SpillingWriter` is a writer that start by writing in a
|
||||||
|
/// buffer.
|
||||||
|
///
|
||||||
|
/// Once a memory limit is reached, the spilling writer will
|
||||||
|
/// call a given `WritePtr` factory and start spilling into it.
|
||||||
|
///
|
||||||
|
/// Spilling here includes:
|
||||||
|
/// - writing all of the data that were written in the in-memory buffer so far
|
||||||
|
/// - writing subsequent data as well.
|
||||||
|
///
|
||||||
|
/// Once entering "spilling" mode, the `SpillingWriter` stays in this mode.
|
||||||
|
pub struct SpillingWriter {
|
||||||
|
state: MutableEnum<SpillingState>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpillingWriter {
|
||||||
|
//// Creates a new `Spilling Writer`.
|
||||||
|
pub fn new(
|
||||||
|
limit: usize,
|
||||||
|
write_factory: Box<dyn FnOnce() -> io::Result<WritePtr>>,
|
||||||
|
) -> SpillingWriter {
|
||||||
|
let state = SpillingState::new(limit, write_factory);
|
||||||
|
SpillingWriter {
|
||||||
|
state: MutableEnum::wrap(state),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finalizes the `SpillingWriter`.
|
||||||
|
///
|
||||||
|
/// The `SpillingResult` object is an enum specific
|
||||||
|
/// to whether the `SpillingWriter` reached the spilling limit
|
||||||
|
/// (In that case, the buffer is returned).
|
||||||
|
///
|
||||||
|
/// If the writer reached the spilling mode, the underlying `WritePtr`
|
||||||
|
/// is terminated and SpillingResult::Spilled is returned.
|
||||||
|
pub fn finalize(self) -> io::Result<SpillingResult> {
|
||||||
|
match self.state.into() {
|
||||||
|
SpillingState::Spilled(wrt) => {
|
||||||
|
wrt.terminate()?;
|
||||||
|
Ok(SpillingResult::Spilled)
|
||||||
|
}
|
||||||
|
SpillingState::Buffer { buffer, .. } => Ok(SpillingResult::Buffer(buffer)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// enum used as the result of `.finalize()`.
|
||||||
|
pub enum SpillingResult {
|
||||||
|
Spilled,
|
||||||
|
Buffer(Vec<u8>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl io::Write for SpillingWriter {
|
||||||
|
fn write(&mut self, payload: &[u8]) -> io::Result<usize> {
|
||||||
|
self.write_all(payload)?;
|
||||||
|
Ok(payload.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self) -> io::Result<()> {
|
||||||
|
if let SpillingState::Spilled(wrt) = &mut *self.state {
|
||||||
|
wrt.flush()?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_all(&mut self, payload: &[u8]) -> io::Result<()> {
|
||||||
|
self.state.map_mutate(|mut state| {
|
||||||
|
state = state.reserve(payload.len())?;
|
||||||
|
match &mut state {
|
||||||
|
SpillingState::Buffer { buffer, .. } => {
|
||||||
|
buffer.extend_from_slice(payload);
|
||||||
|
}
|
||||||
|
SpillingState::Spilled(wrt) => {
|
||||||
|
wrt.write_all(payload)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(state)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::SpillingWriter;
|
||||||
|
use crate::directory::spilling_writer::SpillingResult;
|
||||||
|
use crate::directory::RAMDirectory;
|
||||||
|
use crate::Directory;
|
||||||
|
use std::io::{self, Write};
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_no_spilling() {
|
||||||
|
let ram_directory = RAMDirectory::create();
|
||||||
|
let mut ram_directory_clone = ram_directory.clone();
|
||||||
|
let path = Path::new("test");
|
||||||
|
let write_factory = Box::new(move || {
|
||||||
|
ram_directory_clone
|
||||||
|
.open_write(path)
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||||
|
});
|
||||||
|
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
|
||||||
|
assert!(spilling_wrt.write_all(b"abcd").is_ok());
|
||||||
|
if let SpillingResult::Buffer(buf) = spilling_wrt.finalize().unwrap() {
|
||||||
|
assert_eq!(buf, b"abcd")
|
||||||
|
} else {
|
||||||
|
panic!("spill writer should not have spilled");
|
||||||
|
}
|
||||||
|
assert!(!ram_directory.exists(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_spilling() {
|
||||||
|
let ram_directory = RAMDirectory::create();
|
||||||
|
let mut ram_directory_clone = ram_directory.clone();
|
||||||
|
let path = Path::new("test");
|
||||||
|
let write_factory = Box::new(move || {
|
||||||
|
ram_directory_clone
|
||||||
|
.open_write(path)
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||||
|
});
|
||||||
|
let mut spilling_wrt = SpillingWriter::new(10, write_factory);
|
||||||
|
assert!(spilling_wrt.write_all(b"abcd").is_ok());
|
||||||
|
assert!(spilling_wrt.write_all(b"efghijklmnop").is_ok());
|
||||||
|
if let SpillingResult::Spilled = spilling_wrt.finalize().unwrap() {
|
||||||
|
} else {
|
||||||
|
panic!("spill writer should have spilled");
|
||||||
|
}
|
||||||
|
assert_eq!(
|
||||||
|
ram_directory.atomic_read(path).unwrap(),
|
||||||
|
b"abcdefghijklmnop"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
130
src/docset.rs
130
src/docset.rs
@@ -1,47 +1,58 @@
|
|||||||
|
use crate::common::BitSet;
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
/// Sentinel value returned when a DocSet has been entirely consumed.
|
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
|
||||||
///
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
/// This is not u32::MAX as one would have expected, due to the lack of SSE2 instructions
|
pub enum SkipResult {
|
||||||
/// to compare [u32; 4].
|
/// target was in the docset
|
||||||
pub const TERMINATED: DocId = std::i32::MAX as u32;
|
Reached,
|
||||||
|
/// target was not in the docset, skipping stopped as a greater element was found
|
||||||
|
OverStep,
|
||||||
|
/// the docset was entirely consumed without finding the target, nor any
|
||||||
|
/// element greater than the target.
|
||||||
|
End,
|
||||||
|
}
|
||||||
|
|
||||||
/// Represents an iterable set of sorted doc ids.
|
/// Represents an iterable set of sorted doc ids.
|
||||||
pub trait DocSet {
|
pub trait DocSet {
|
||||||
/// Goes to the next element.
|
/// Goes to the next element.
|
||||||
///
|
/// `.advance(...)` needs to be called a first time to point to the correct
|
||||||
/// The DocId of the next element is returned.
|
/// element.
|
||||||
/// In other words we should always have :
|
fn advance(&mut self) -> bool;
|
||||||
/// ```ignore
|
|
||||||
/// let doc = docset.advance();
|
|
||||||
/// assert_eq!(doc, docset.doc());
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// If we reached the end of the DocSet, TERMINATED should be returned.
|
|
||||||
///
|
|
||||||
/// Calling `.advance()` on a terminated DocSet should be supported, and TERMINATED should
|
|
||||||
/// be returned.
|
|
||||||
/// TODO Test existing docsets.
|
|
||||||
fn advance(&mut self) -> DocId;
|
|
||||||
|
|
||||||
/// Advances the DocSet forward until reaching the target, or going to the
|
/// After skipping, position the iterator in such a way that `.doc()`
|
||||||
/// lowest DocId greater than the target.
|
/// will return a value greater than or equal to target.
|
||||||
///
|
///
|
||||||
/// If the end of the DocSet is reached, TERMINATED is returned.
|
/// SkipResult expresses whether the `target value` was reached, overstepped,
|
||||||
|
/// or if the `DocSet` was entirely consumed without finding any value
|
||||||
|
/// greater or equal to the `target`.
|
||||||
///
|
///
|
||||||
/// Calling `.seek(target)` on a terminated DocSet is legal. Implementation
|
/// WARNING: Calling skip always advances the docset.
|
||||||
/// of DocSet should support it.
|
/// More specifically, if the docset is already positionned on the target
|
||||||
|
/// skipping will advance to the next position and return SkipResult::Overstep.
|
||||||
///
|
///
|
||||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
/// If `.skip_next()` oversteps, then the docset must be positionned correctly
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
/// on an existing document. In other words, `.doc()` should return the first document
|
||||||
let mut doc = self.doc();
|
/// greater than `DocId`.
|
||||||
while doc < target {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
doc = self.advance();
|
if !self.advance() {
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
loop {
|
||||||
|
match self.doc().cmp(&target) {
|
||||||
|
Ordering::Less => {
|
||||||
|
if !self.advance() {
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ordering::Equal => return SkipResult::Reached,
|
||||||
|
Ordering::Greater => return SkipResult::OverStep,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
doc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fills a given mutable buffer with the next doc ids from the
|
/// Fills a given mutable buffer with the next doc ids from the
|
||||||
@@ -60,38 +71,38 @@ pub trait DocSet {
|
|||||||
/// use case where batching. The normal way to
|
/// use case where batching. The normal way to
|
||||||
/// go through the `DocId`'s is to call `.advance()`.
|
/// go through the `DocId`'s is to call `.advance()`.
|
||||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||||
if self.doc() == TERMINATED {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
for (i, buffer_val) in buffer.iter_mut().enumerate() {
|
||||||
*buffer_val = self.doc();
|
if self.advance() {
|
||||||
if self.advance() == TERMINATED {
|
*buffer_val = self.doc();
|
||||||
return i + 1;
|
} else {
|
||||||
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buffer.len()
|
buffer.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
/// Right after creating a new DocSet, the docset points to the first document.
|
|
||||||
///
|
|
||||||
/// If the DocSet is empty, .doc() should return `TERMINATED`.
|
|
||||||
fn doc(&self) -> DocId;
|
fn doc(&self) -> DocId;
|
||||||
|
|
||||||
/// Returns a best-effort hint of the
|
/// Returns a best-effort hint of the
|
||||||
/// length of the docset.
|
/// length of the docset.
|
||||||
fn size_hint(&self) -> u32;
|
fn size_hint(&self) -> u32;
|
||||||
|
|
||||||
|
/// Appends all docs to a `bitset`.
|
||||||
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
while self.advance() {
|
||||||
|
bitset.insert(self.doc());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the number documents matching.
|
/// Returns the number documents matching.
|
||||||
/// Calling this method consumes the `DocSet`.
|
/// Calling this method consumes the `DocSet`.
|
||||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
let mut doc = self.doc();
|
while self.advance() {
|
||||||
while doc != TERMINATED {
|
if !delete_bitset.is_deleted(self.doc()) {
|
||||||
if !delete_bitset.is_deleted(doc) {
|
|
||||||
count += 1u32;
|
count += 1u32;
|
||||||
}
|
}
|
||||||
doc = self.advance();
|
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
}
|
}
|
||||||
@@ -103,42 +114,22 @@ pub trait DocSet {
|
|||||||
/// given by `count()`.
|
/// given by `count()`.
|
||||||
fn count_including_deleted(&mut self) -> u32 {
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
let mut count = 0u32;
|
let mut count = 0u32;
|
||||||
let mut doc = self.doc();
|
while self.advance() {
|
||||||
while doc != TERMINATED {
|
|
||||||
count += 1u32;
|
count += 1u32;
|
||||||
doc = self.advance();
|
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> DocSet for &'a mut dyn DocSet {
|
|
||||||
fn advance(&mut self) -> u32 {
|
|
||||||
(**self).advance()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
|
||||||
(**self).seek(target)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn doc(&self) -> u32 {
|
|
||||||
(**self).doc()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
|
||||||
(**self).size_hint()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.advance()
|
unboxed.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.seek(target)
|
unboxed.skip_next(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
@@ -160,4 +151,9 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
|||||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
unboxed.count_including_deleted()
|
unboxed.count_including_deleted()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||||
|
unboxed.append_to_bitset(bitset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use crate::directory::error::{Incompatibility, LockError};
|
|||||||
use crate::fastfield::FastFieldNotAvailableError;
|
use crate::fastfield::FastFieldNotAvailableError;
|
||||||
use crate::query;
|
use crate::query;
|
||||||
use crate::schema;
|
use crate::schema;
|
||||||
|
use serde_json;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::PoisonError;
|
use std::sync::PoisonError;
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ mod tests {
|
|||||||
|
|
||||||
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
||||||
let test_path = PathBuf::from("test");
|
let test_path = PathBuf::from("test");
|
||||||
let mut directory = RAMDirectory::create();
|
let mut directory = RAMDirectory::default();
|
||||||
{
|
{
|
||||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
let mut writer = directory.open_write(&*test_path).unwrap();
|
||||||
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
||||||
|
|||||||
@@ -8,22 +8,26 @@ use crate::core::SegmentComponent;
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::directory::TerminatingWrite;
|
|
||||||
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::directory::{TerminatingWrite, WatchCallbackList};
|
||||||
|
use crate::docset::DocSet;
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::fastfield::write_delete_bitset;
|
use crate::fastfield::write_delete_bitset;
|
||||||
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||||
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
||||||
use crate::indexer::operation::DeleteOperation;
|
use crate::indexer::operation::DeleteOperation;
|
||||||
|
use crate::indexer::segment_manager::SegmentRegisters;
|
||||||
|
use crate::indexer::segment_register::SegmentRegister;
|
||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
use crate::indexer::MergePolicy;
|
use crate::indexer::{SegmentEntry, ResourceManager};
|
||||||
use crate::indexer::SegmentEntry;
|
|
||||||
use crate::indexer::SegmentWriter;
|
use crate::indexer::SegmentWriter;
|
||||||
|
use crate::indexer::{IndexWriterConfig, MergePolicy};
|
||||||
|
use crate::reader::NRTReader;
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::Opstamp;
|
use crate::tokenizer::TokenizerManager;
|
||||||
|
use crate::{IndexReader, Opstamp};
|
||||||
use crossbeam::channel;
|
use crossbeam::channel;
|
||||||
use futures::executor::block_on;
|
use futures::executor::block_on;
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
@@ -31,18 +35,10 @@ use smallvec::smallvec;
|
|||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::thread::JoinHandle;
|
use std::thread::JoinHandle;
|
||||||
|
|
||||||
// Size of the margin for the heap. A segment is closed when the remaining memory
|
|
||||||
// in the heap goes below MARGIN_IN_BYTES.
|
|
||||||
pub const MARGIN_IN_BYTES: usize = 1_000_000;
|
|
||||||
|
|
||||||
// We impose the memory per thread to be at least 3 MB.
|
|
||||||
pub const HEAP_SIZE_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize;
|
|
||||||
pub const HEAP_SIZE_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
|
|
||||||
|
|
||||||
// Add document will block if the number of docs waiting in the queue to be indexed
|
// Add document will block if the number of docs waiting in the queue to be indexed
|
||||||
// reaches `PIPELINE_MAX_SIZE_IN_DOCS`
|
// reaches `PIPELINE_MAX_SIZE_IN_DOCS`
|
||||||
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
|
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
|
||||||
@@ -69,8 +65,9 @@ pub struct IndexWriter {
|
|||||||
_directory_lock: Option<DirectoryLock>,
|
_directory_lock: Option<DirectoryLock>,
|
||||||
|
|
||||||
index: Index,
|
index: Index,
|
||||||
|
config: IndexWriterConfig,
|
||||||
|
|
||||||
heap_size_in_bytes_per_thread: usize,
|
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
|
|
||||||
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
||||||
|
|
||||||
@@ -80,13 +77,14 @@ pub struct IndexWriter {
|
|||||||
segment_updater: SegmentUpdater,
|
segment_updater: SegmentUpdater,
|
||||||
|
|
||||||
worker_id: usize,
|
worker_id: usize,
|
||||||
|
|
||||||
num_threads: usize,
|
|
||||||
|
|
||||||
delete_queue: DeleteQueue,
|
delete_queue: DeleteQueue,
|
||||||
|
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
committed_opstamp: Opstamp,
|
committed_opstamp: Opstamp,
|
||||||
|
|
||||||
|
on_commit: WatchCallbackList,
|
||||||
|
|
||||||
|
memory_manager: ResourceManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_deleted_bitset(
|
fn compute_deleted_bitset(
|
||||||
@@ -112,15 +110,15 @@ fn compute_deleted_bitset(
|
|||||||
if let Some(mut docset) =
|
if let Some(mut docset) =
|
||||||
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
|
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
|
||||||
{
|
{
|
||||||
let mut deleted_doc = docset.doc();
|
while docset.advance() {
|
||||||
while deleted_doc != TERMINATED {
|
let deleted_doc = docset.doc();
|
||||||
if deleted_doc < limit_doc {
|
if deleted_doc < limit_doc {
|
||||||
delete_bitset.insert(deleted_doc);
|
delete_bitset.insert(deleted_doc);
|
||||||
might_have_changed = true;
|
might_have_changed = true;
|
||||||
}
|
}
|
||||||
deleted_doc = docset.advance();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_cursor.advance();
|
delete_cursor.advance();
|
||||||
}
|
}
|
||||||
Ok(might_have_changed)
|
Ok(might_have_changed)
|
||||||
@@ -133,7 +131,6 @@ fn compute_deleted_bitset(
|
|||||||
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
||||||
/// the `target_opstamp`, `segment_entry` is not updated.
|
/// the `target_opstamp`, `segment_entry` is not updated.
|
||||||
pub(crate) fn advance_deletes(
|
pub(crate) fn advance_deletes(
|
||||||
mut segment: Segment,
|
|
||||||
segment_entry: &mut SegmentEntry,
|
segment_entry: &mut SegmentEntry,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
@@ -142,25 +139,33 @@ pub(crate) fn advance_deletes(
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() {
|
let delete_bitset_opt = segment_entry.take_delete_bitset();
|
||||||
|
|
||||||
|
// We avoid directly advancing the `SegmentEntry` delete cursor, because
|
||||||
|
// we do not want to end up in an invalid state if the delete bitset
|
||||||
|
// serialization fails.
|
||||||
|
let mut delete_cursor = segment_entry.delete_cursor();
|
||||||
|
|
||||||
|
if delete_bitset_opt.is_none() && segment_entry.delete_cursor().get().is_none() {
|
||||||
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We open our current serialized segment to compute the new deleted bitset.
|
||||||
|
let segment = segment_entry.segment().clone();
|
||||||
let segment_reader = SegmentReader::open(&segment)?;
|
let segment_reader = SegmentReader::open(&segment)?;
|
||||||
|
|
||||||
let max_doc = segment_reader.max_doc();
|
let max_doc = segment_reader.max_doc();
|
||||||
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
|
||||||
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
let mut delete_bitset: BitSet =
|
||||||
None => BitSet::with_max_value(max_doc),
|
delete_bitset_opt.unwrap_or_else(|| BitSet::with_max_value(max_doc));
|
||||||
};
|
|
||||||
|
|
||||||
let num_deleted_docs_before = segment.meta().num_deleted_docs();
|
let num_deleted_docs_before = segment.meta().num_deleted_docs();
|
||||||
|
|
||||||
compute_deleted_bitset(
|
compute_deleted_bitset(
|
||||||
&mut delete_bitset,
|
&mut delete_bitset,
|
||||||
&segment_reader,
|
&segment_reader,
|
||||||
segment_entry.delete_cursor(),
|
&mut delete_cursor,
|
||||||
&DocToOpstampMapping::None,
|
&DocToOpstampMapping::None,
|
||||||
target_opstamp,
|
target_opstamp,
|
||||||
)?;
|
)?;
|
||||||
@@ -179,32 +184,40 @@ pub(crate) fn advance_deletes(
|
|||||||
let num_deleted_docs: u32 = delete_bitset.len() as u32;
|
let num_deleted_docs: u32 = delete_bitset.len() as u32;
|
||||||
if num_deleted_docs > num_deleted_docs_before {
|
if num_deleted_docs > num_deleted_docs_before {
|
||||||
// There are new deletes. We need to write a new delete file.
|
// There are new deletes. We need to write a new delete file.
|
||||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
let mut delete_file = segment
|
||||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
.with_delete_meta(num_deleted_docs as u32, target_opstamp)
|
||||||
|
.open_write(SegmentComponent::DELETE)?;
|
||||||
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
||||||
delete_file.terminate()?;
|
delete_file.terminate()?;
|
||||||
|
segment_entry.reset_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
segment_entry.set_meta(segment.meta().clone());
|
// Regardless of whether we did end up having to write a new file or not
|
||||||
|
// we advance the `delete_cursor`. This is an optimisation. We want to ensure we do not
|
||||||
|
// check that a given deleted term does not match any of our docs more than once.
|
||||||
|
segment_entry.set_delete_cursor(delete_cursor);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index_documents(
|
fn index_documents(
|
||||||
memory_budget: usize,
|
config: IndexWriterConfig,
|
||||||
segment: Segment,
|
segment: Segment,
|
||||||
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
||||||
segment_updater: &mut SegmentUpdater,
|
segment_updater: &mut SegmentUpdater,
|
||||||
|
tokenizers: &TokenizerManager,
|
||||||
mut delete_cursor: DeleteCursor,
|
mut delete_cursor: DeleteCursor,
|
||||||
|
memory_manager: ResourceManager
|
||||||
) -> crate::Result<bool> {
|
) -> crate::Result<bool> {
|
||||||
let schema = segment.schema();
|
let schema = segment.schema();
|
||||||
|
|
||||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
let mut segment_writer = SegmentWriter::for_segment(
|
||||||
|
&config, segment, &schema, tokenizers, memory_manager)?;
|
||||||
for document_group in grouped_document_iterator {
|
for document_group in grouped_document_iterator {
|
||||||
for doc in document_group {
|
for doc in document_group {
|
||||||
segment_writer.add_document(doc, &schema)?;
|
segment_writer.add_document(doc, &schema)?;
|
||||||
}
|
}
|
||||||
let mem_usage = segment_writer.mem_usage();
|
let mem_usage = segment_writer.mem_usage();
|
||||||
if mem_usage >= memory_budget - MARGIN_IN_BYTES {
|
if mem_usage >= config.heap_size_before_flushing() {
|
||||||
info!(
|
info!(
|
||||||
"Buffer limit reached, flushing segment with maxdoc={}.",
|
"Buffer limit reached, flushing segment with maxdoc={}.",
|
||||||
segment_writer.max_doc()
|
segment_writer.max_doc()
|
||||||
@@ -223,24 +236,14 @@ fn index_documents(
|
|||||||
// the worker thread.
|
// the worker thread.
|
||||||
assert!(max_doc > 0);
|
assert!(max_doc > 0);
|
||||||
|
|
||||||
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
let (segment, doc_opstamps): (Segment, Vec<Opstamp>) = segment_writer.finalize()?;
|
||||||
|
|
||||||
let segment_with_max_doc = segment.with_max_doc(max_doc);
|
|
||||||
|
|
||||||
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||||
|
|
||||||
let delete_bitset_opt = apply_deletes(
|
let delete_bitset_opt =
|
||||||
&segment_with_max_doc,
|
apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
|
||||||
&mut delete_cursor,
|
|
||||||
&doc_opstamps,
|
|
||||||
last_docstamp,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let segment_entry = SegmentEntry::new(
|
let segment_entry = SegmentEntry::new(segment, delete_cursor, delete_bitset_opt);
|
||||||
segment_with_max_doc.meta().clone(),
|
|
||||||
delete_cursor,
|
|
||||||
delete_bitset_opt,
|
|
||||||
);
|
|
||||||
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
@@ -292,21 +295,10 @@ impl IndexWriter {
|
|||||||
/// If the heap size per thread is too small, panics.
|
/// If the heap size per thread is too small, panics.
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
index: &Index,
|
index: &Index,
|
||||||
num_threads: usize,
|
mut config: IndexWriterConfig,
|
||||||
heap_size_in_bytes_per_thread: usize,
|
|
||||||
directory_lock: DirectoryLock,
|
directory_lock: DirectoryLock,
|
||||||
) -> crate::Result<IndexWriter> {
|
) -> crate::Result<IndexWriter> {
|
||||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
config.validate()?;
|
||||||
let err_msg = format!(
|
|
||||||
"The heap size per thread needs to be at least {}.",
|
|
||||||
HEAP_SIZE_MIN
|
|
||||||
);
|
|
||||||
return Err(TantivyError::InvalidArgument(err_msg));
|
|
||||||
}
|
|
||||||
if heap_size_in_bytes_per_thread >= HEAP_SIZE_MAX {
|
|
||||||
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
|
|
||||||
return Err(TantivyError::InvalidArgument(err_msg));
|
|
||||||
}
|
|
||||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
|
|
||||||
@@ -314,16 +306,26 @@ impl IndexWriter {
|
|||||||
|
|
||||||
let current_opstamp = index.load_metas()?.opstamp;
|
let current_opstamp = index.load_metas()?.opstamp;
|
||||||
|
|
||||||
|
let meta = index.load_metas()?;
|
||||||
|
|
||||||
let stamper = Stamper::new(current_opstamp);
|
let stamper = Stamper::new(current_opstamp);
|
||||||
|
|
||||||
|
let commited_segments = SegmentRegister::new(
|
||||||
|
index.directory(),
|
||||||
|
&index.schema(),
|
||||||
|
meta.segments,
|
||||||
|
&delete_queue.cursor(),
|
||||||
|
);
|
||||||
|
let segment_registers = Arc::new(RwLock::new(SegmentRegisters::new(commited_segments)));
|
||||||
|
|
||||||
let segment_updater =
|
let segment_updater =
|
||||||
SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?;
|
SegmentUpdater::create(segment_registers.clone(), index.clone(), stamper.clone())?;
|
||||||
|
|
||||||
let mut index_writer = IndexWriter {
|
let mut index_writer = IndexWriter {
|
||||||
_directory_lock: Some(directory_lock),
|
_directory_lock: Some(directory_lock),
|
||||||
|
|
||||||
heap_size_in_bytes_per_thread,
|
|
||||||
index: index.clone(),
|
index: index.clone(),
|
||||||
|
config,
|
||||||
|
|
||||||
operation_receiver: document_receiver,
|
operation_receiver: document_receiver,
|
||||||
operation_sender: document_sender,
|
operation_sender: document_sender,
|
||||||
@@ -331,7 +333,6 @@ impl IndexWriter {
|
|||||||
segment_updater,
|
segment_updater,
|
||||||
|
|
||||||
workers_join_handle: vec![],
|
workers_join_handle: vec![],
|
||||||
num_threads,
|
|
||||||
|
|
||||||
delete_queue,
|
delete_queue,
|
||||||
|
|
||||||
@@ -339,6 +340,10 @@ impl IndexWriter {
|
|||||||
stamper,
|
stamper,
|
||||||
|
|
||||||
worker_id: 0,
|
worker_id: 0,
|
||||||
|
segment_registers,
|
||||||
|
on_commit: Default::default(),
|
||||||
|
|
||||||
|
memory_manager: Default::default()
|
||||||
};
|
};
|
||||||
index_writer.start_workers()?;
|
index_writer.start_workers()?;
|
||||||
Ok(index_writer)
|
Ok(index_writer)
|
||||||
@@ -346,7 +351,7 @@ impl IndexWriter {
|
|||||||
|
|
||||||
fn drop_sender(&mut self) {
|
fn drop_sender(&mut self) {
|
||||||
let (sender, _receiver) = channel::bounded(1);
|
let (sender, _receiver) = channel::bounded(1);
|
||||||
self.operation_sender = sender;
|
mem::replace(&mut self.operation_sender, sender);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If there are some merging threads, blocks until they all finish their work and
|
/// If there are some merging threads, blocks until they all finish their work and
|
||||||
@@ -366,23 +371,11 @@ impl IndexWriter {
|
|||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = self
|
self
|
||||||
.segment_updater
|
.segment_updater
|
||||||
.wait_merging_thread()
|
.wait_merging_thread();
|
||||||
.map_err(|_| TantivyError::ErrorInThread("Failed to join merging thread.".into()));
|
|
||||||
|
|
||||||
if let Err(ref e) = result {
|
Ok(())
|
||||||
error!("Some merging thread failed {:?}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
|
||||||
let delete_cursor = self.delete_queue.cursor();
|
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
|
||||||
block_on(self.segment_updater.schedule_add_segment(segment_entry))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new segment.
|
/// Creates a new segment.
|
||||||
@@ -405,8 +398,9 @@ impl IndexWriter {
|
|||||||
|
|
||||||
let mut delete_cursor = self.delete_queue.cursor();
|
let mut delete_cursor = self.delete_queue.cursor();
|
||||||
|
|
||||||
let mem_budget = self.heap_size_in_bytes_per_thread;
|
|
||||||
let index = self.index.clone();
|
let index = self.index.clone();
|
||||||
|
let config = self.config.clone();
|
||||||
|
let memory_manager = self.memory_manager.clone();
|
||||||
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
||||||
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@@ -435,11 +429,13 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
let segment = index.new_segment();
|
let segment = index.new_segment();
|
||||||
index_documents(
|
index_documents(
|
||||||
mem_budget,
|
config.clone(),
|
||||||
segment,
|
segment,
|
||||||
&mut document_iterator,
|
&mut document_iterator,
|
||||||
&mut segment_updater,
|
&mut segment_updater,
|
||||||
|
index.tokenizers(),
|
||||||
delete_cursor.clone(),
|
delete_cursor.clone(),
|
||||||
|
memory_manager.clone()
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
@@ -459,7 +455,7 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn start_workers(&mut self) -> crate::Result<()> {
|
fn start_workers(&mut self) -> crate::Result<()> {
|
||||||
for _ in 0..self.num_threads {
|
for _ in 0..self.config.max_indexing_threads {
|
||||||
self.add_indexing_worker()?;
|
self.add_indexing_worker()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -564,12 +560,8 @@ impl IndexWriter {
|
|||||||
.take()
|
.take()
|
||||||
.expect("The IndexWriter does not have any lock. This is a bug, please report.");
|
.expect("The IndexWriter does not have any lock. This is a bug, please report.");
|
||||||
|
|
||||||
let new_index_writer: IndexWriter = IndexWriter::new(
|
let new_index_writer: IndexWriter =
|
||||||
&self.index,
|
IndexWriter::new(&self.index, self.config.clone(), directory_lock)?;
|
||||||
self.num_threads,
|
|
||||||
self.heap_size_in_bytes_per_thread,
|
|
||||||
directory_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// the current `self` is dropped right away because of this call.
|
// the current `self` is dropped right away because of this call.
|
||||||
//
|
//
|
||||||
@@ -608,7 +600,7 @@ impl IndexWriter {
|
|||||||
/// It is also possible to add a payload to the `commit`
|
/// It is also possible to add a payload to the `commit`
|
||||||
/// using this API.
|
/// using this API.
|
||||||
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
||||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
|
pub fn prepare_commit(&mut self, soft_commit: bool) -> crate::Result<PreparedCommit> {
|
||||||
// Here, because we join all of the worker threads,
|
// Here, because we join all of the worker threads,
|
||||||
// all of the segment update for this commit have been
|
// all of the segment update for this commit have been
|
||||||
// sent.
|
// sent.
|
||||||
@@ -636,7 +628,7 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let commit_opstamp = self.stamper.stamp();
|
let commit_opstamp = self.stamper.stamp();
|
||||||
let prepared_commit = PreparedCommit::new(self, commit_opstamp);
|
let prepared_commit = PreparedCommit::new(self, commit_opstamp, soft_commit);
|
||||||
info!("Prepared commit {}", commit_opstamp);
|
info!("Prepared commit {}", commit_opstamp);
|
||||||
Ok(prepared_commit)
|
Ok(prepared_commit)
|
||||||
}
|
}
|
||||||
@@ -656,7 +648,25 @@ impl IndexWriter {
|
|||||||
/// that made it in the commit.
|
/// that made it in the commit.
|
||||||
///
|
///
|
||||||
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
||||||
self.prepare_commit()?.commit()
|
self.prepare_commit(false)?.commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn soft_commit(&mut self) -> crate::Result<Opstamp> {
|
||||||
|
self.prepare_commit(true)?.commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn trigger_commit(&self) -> impl Future<Output = ()> {
|
||||||
|
self.on_commit.broadcast()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reader(&self, num_searchers: usize) -> crate::Result<IndexReader> {
|
||||||
|
let nrt_reader = NRTReader::create(
|
||||||
|
num_searchers,
|
||||||
|
self.index.clone(),
|
||||||
|
self.segment_registers.clone(),
|
||||||
|
&self.on_commit,
|
||||||
|
)?;
|
||||||
|
Ok(IndexReader::NRT(nrt_reader))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
|
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
|
||||||
@@ -1054,7 +1064,8 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(text_field => "a"));
|
index_writer.add_document(doc!(text_field => "a"));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
let mut prepared_commit =
|
||||||
|
index_writer.prepare_commit(false).expect("commit failed");
|
||||||
prepared_commit.set_payload("first commit");
|
prepared_commit.set_payload("first commit");
|
||||||
prepared_commit.commit().expect("commit failed");
|
prepared_commit.commit().expect("commit failed");
|
||||||
}
|
}
|
||||||
@@ -1087,7 +1098,8 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(text_field => "a"));
|
index_writer.add_document(doc!(text_field => "a"));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
let mut prepared_commit =
|
||||||
|
index_writer.prepare_commit(false).expect("commit failed");
|
||||||
prepared_commit.set_payload("first commit");
|
prepared_commit.set_payload("first commit");
|
||||||
prepared_commit.abort().expect("commit failed");
|
prepared_commit.abort().expect("commit failed");
|
||||||
}
|
}
|
||||||
@@ -1265,4 +1277,41 @@ mod tests {
|
|||||||
let commit = index_writer.commit();
|
let commit = index_writer.commit();
|
||||||
assert!(commit.is_ok());
|
assert!(commit.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_reader() {
|
||||||
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
let reader = index_writer.reader(2).unwrap();
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.num_docs(), 1u64);
|
||||||
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
assert_eq!(reader.searcher().num_docs(), 2u64);
|
||||||
|
assert_eq!(searcher.num_docs(), 1u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_reader_soft_commit() {
|
||||||
|
let mut schema_builder = schema::Schema::builder();
|
||||||
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
|
assert!(index_writer.soft_commit().is_ok());
|
||||||
|
let nrt_reader = index_writer.reader(2).unwrap();
|
||||||
|
let normal_reader = index.reader_builder().try_into().unwrap();
|
||||||
|
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||||
|
assert_eq!(normal_reader.searcher().num_docs(), 0u64);
|
||||||
|
assert!(index_writer.commit().is_ok());
|
||||||
|
assert!(normal_reader.reload().is_ok());
|
||||||
|
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||||
|
assert_eq!(normal_reader.searcher().num_docs(), 1u64);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
133
src/indexer/index_writer_config.rs
Normal file
133
src/indexer/index_writer_config.rs
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
// Size of the margin for the heap. A segment is closed when the remaining memory
|
||||||
|
// in the heap goes below MARGIN_IN_BYTES.
|
||||||
|
const MARGIN_IN_BYTES: u64 = 1_000_000;
|
||||||
|
|
||||||
|
// We impose the memory per thread to be at least 3 MB.
|
||||||
|
const HEAP_SIZE_MIN: u64 = MARGIN_IN_BYTES * 3u64;
|
||||||
|
const HEAP_SIZE_MAX: u64 = u32::max_value() as u64 - MARGIN_IN_BYTES;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct IndexWriterConfig {
|
||||||
|
pub max_indexing_threads: usize,
|
||||||
|
pub max_merging_threads: usize,
|
||||||
|
pub memory_budget: u64,
|
||||||
|
pub store_flush_num_bytes: u64,
|
||||||
|
pub persist_low: u64,
|
||||||
|
pub persist_high: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for IndexWriterConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
max_merging_threads: 3,
|
||||||
|
memory_budget: 50_000_000u64,
|
||||||
|
store_flush_num_bytes: 10_000_000u64,
|
||||||
|
persist_low: 10_000_000u64,
|
||||||
|
persist_high: 50_000_000u64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexWriterConfig {
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn for_test() -> IndexWriterConfig {
|
||||||
|
IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
max_merging_threads: 5,
|
||||||
|
memory_budget: 4_000_000u64,
|
||||||
|
store_flush_num_bytes: 500_000u64,
|
||||||
|
persist_low: 2_000_000u64,
|
||||||
|
persist_high: 3_000_000u64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensures the `IndexWriterConfig` is correct.
|
||||||
|
//
|
||||||
|
// This method checks that the values in the `IndexWriterConfig`
|
||||||
|
// are valid. If it is not, it may mutate some of the values (like `max_num_threads`) to
|
||||||
|
// fit the contracts or return an error with an explicit error message.
|
||||||
|
//
|
||||||
|
// If called twice, the config is guaranteed to not be updated the second time.
|
||||||
|
pub fn validate(&mut self) -> crate::Result<()> {
|
||||||
|
if self.memory_budget < HEAP_SIZE_MIN {
|
||||||
|
let err_msg = format!(
|
||||||
|
"The heap size per thread needs to be at least {}.",
|
||||||
|
HEAP_SIZE_MIN
|
||||||
|
);
|
||||||
|
return Err(crate::TantivyError::InvalidArgument(err_msg));
|
||||||
|
}
|
||||||
|
let heap_size_in_bytes_per_thread = self.heap_size_in_byte_per_thread();
|
||||||
|
if heap_size_in_bytes_per_thread >= HEAP_SIZE_MAX {
|
||||||
|
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
|
||||||
|
return Err(crate::TantivyError::InvalidArgument(err_msg));
|
||||||
|
}
|
||||||
|
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||||
|
self.max_indexing_threads = (self.memory_budget / HEAP_SIZE_MIN) as usize;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn heap_size_in_byte_per_thread(&self) -> u64 {
|
||||||
|
self.memory_budget / self.max_indexing_threads as u64
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn heap_size_before_flushing(&self) -> u64 {
|
||||||
|
self.heap_size_in_byte_per_thread() - MARGIN_IN_BYTES
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::IndexWriterConfig;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_simple() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 3,
|
||||||
|
memory_budget: super::HEAP_SIZE_MIN * 3,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(index.validate().is_ok());
|
||||||
|
assert_eq!(index.max_indexing_threads, 3);
|
||||||
|
assert_eq!(index.heap_size_in_byte_per_thread(), super::HEAP_SIZE_MIN);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_reduce_num_threads() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 3,
|
||||||
|
memory_budget: super::HEAP_SIZE_MIN,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(index.validate().is_ok());
|
||||||
|
assert_eq!(index.max_indexing_threads, 1);
|
||||||
|
assert_eq!(index.heap_size_in_byte_per_thread(), super::HEAP_SIZE_MIN);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_not_enough_memory() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
memory_budget: super::HEAP_SIZE_MIN - 1,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(
|
||||||
|
matches!(index.validate(), Err(crate::TantivyError::InvalidArgument(msg) ) if msg.contains("The heap size per thread needs to be at least"))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_writer_config_too_much_memory() {
|
||||||
|
let mut index = IndexWriterConfig {
|
||||||
|
max_indexing_threads: 1,
|
||||||
|
memory_budget: (u32::max_value() as u64) + 1,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
assert!(
|
||||||
|
matches!(index.validate(), Err(crate::TantivyError::InvalidArgument(msg) ) if msg.contains("The heap size per thread cannot exceed"))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -54,6 +54,10 @@ impl LogMergePolicy {
|
|||||||
|
|
||||||
impl MergePolicy for LogMergePolicy {
|
impl MergePolicy for LogMergePolicy {
|
||||||
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> {
|
||||||
|
if segments.is_empty() {
|
||||||
|
return Vec::new();
|
||||||
|
}
|
||||||
|
|
||||||
let mut size_sorted_tuples = segments
|
let mut size_sorted_tuples = segments
|
||||||
.iter()
|
.iter()
|
||||||
.map(SegmentMeta::num_docs)
|
.map(SegmentMeta::num_docs)
|
||||||
@@ -63,35 +67,27 @@ impl MergePolicy for LogMergePolicy {
|
|||||||
|
|
||||||
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
size_sorted_tuples.sort_by(|x, y| y.1.cmp(&(x.1)));
|
||||||
|
|
||||||
if size_sorted_tuples.len() <= 1 {
|
|
||||||
return Vec::new();
|
|
||||||
}
|
|
||||||
|
|
||||||
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
let size_sorted_log_tuples: Vec<_> = size_sorted_tuples
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
.map(|(ind, num_docs)| (ind, f64::from(self.clip_min_size(num_docs)).log2()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if let Some(&(first_ind, first_score)) = size_sorted_log_tuples.first() {
|
let (first_ind, first_score) = size_sorted_log_tuples[0];
|
||||||
let mut current_max_log_size = first_score;
|
let mut current_max_log_size = first_score;
|
||||||
let mut levels = vec![vec![first_ind]];
|
let mut levels = vec![vec![first_ind]];
|
||||||
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) {
|
||||||
if score < (current_max_log_size - self.level_log_size) {
|
if score < (current_max_log_size - self.level_log_size) {
|
||||||
current_max_log_size = score;
|
current_max_log_size = score;
|
||||||
levels.push(Vec::new());
|
levels.push(Vec::new());
|
||||||
}
|
|
||||||
levels.last_mut().unwrap().push(ind);
|
|
||||||
}
|
}
|
||||||
levels
|
levels.last_mut().unwrap().push(ind);
|
||||||
.iter()
|
|
||||||
.filter(|level| level.len() >= self.min_merge_size)
|
|
||||||
.map(|ind_vec| {
|
|
||||||
MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect())
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
return vec![];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
levels
|
||||||
|
.iter()
|
||||||
|
.filter(|level| level.len() >= self.min_merge_size)
|
||||||
|
.map(|ind_vec| MergeCandidate(ind_vec.iter().map(|&ind| segments[ind].id()).collect()))
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,7 +179,6 @@ mod tests {
|
|||||||
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
let result_list = test_merge_policy().compute_merge_candidates(&test_input);
|
||||||
assert_eq!(result_list.len(), 2);
|
assert_eq!(result_list.len(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_log_merge_policy_small_segments() {
|
fn test_log_merge_policy_small_segments() {
|
||||||
// segments under min_layer_size are merged together
|
// segments under min_layer_size are merged together
|
||||||
@@ -199,17 +194,6 @@ mod tests {
|
|||||||
assert_eq!(result_list.len(), 1);
|
assert_eq!(result_list.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_log_merge_policy_all_segments_too_large_to_merge() {
|
|
||||||
let eight_large_segments: Vec<SegmentMeta> =
|
|
||||||
std::iter::repeat_with(|| create_random_segment_meta(100_001))
|
|
||||||
.take(8)
|
|
||||||
.collect();
|
|
||||||
assert!(test_merge_policy()
|
|
||||||
.compute_merge_candidates(&eight_large_segments)
|
|
||||||
.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_large_merge_segments() {
|
fn test_large_merge_segments() {
|
||||||
let test_input = vec![
|
let test_input = vec![
|
||||||
|
|||||||
@@ -1,17 +1,22 @@
|
|||||||
|
use crate::indexer::resource_manager::{Allocation, ResourceManager};
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::SegmentId;
|
use crate::SegmentId;
|
||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::fmt;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default, Clone)]
|
||||||
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
pub(crate) struct MergeOperationInventory {
|
||||||
|
inventory: Inventory<InnerMergeOperation>,
|
||||||
|
num_merge_watcher: ResourceManager,
|
||||||
|
}
|
||||||
|
|
||||||
impl Deref for MergeOperationInventory {
|
impl Deref for MergeOperationInventory {
|
||||||
type Target = Inventory<InnerMergeOperation>;
|
type Target = Inventory<InnerMergeOperation>;
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
fn deref(&self) -> &Self::Target {
|
||||||
&self.0
|
&self.inventory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -25,6 +30,10 @@ impl MergeOperationInventory {
|
|||||||
}
|
}
|
||||||
segment_in_merge
|
segment_in_merge
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn wait_until_empty(&self) {
|
||||||
|
let _ = self.num_merge_watcher.wait_until_in_range(0..1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `MergeOperation` has two roles.
|
/// A `MergeOperation` has two roles.
|
||||||
@@ -47,6 +56,17 @@ pub struct MergeOperation {
|
|||||||
pub(crate) struct InnerMergeOperation {
|
pub(crate) struct InnerMergeOperation {
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
|
_allocation: Allocation,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for InnerMergeOperation {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"MergeOp(target_opstamp={:?}, segment_ids={:?})",
|
||||||
|
self.target_opstamp, self.segment_ids
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MergeOperation {
|
impl MergeOperation {
|
||||||
@@ -55,9 +75,11 @@ impl MergeOperation {
|
|||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
) -> MergeOperation {
|
) -> MergeOperation {
|
||||||
|
let allocation = inventory.num_merge_watcher.allocate(1);
|
||||||
let inner_merge_operation = InnerMergeOperation {
|
let inner_merge_operation = InnerMergeOperation {
|
||||||
target_opstamp,
|
target_opstamp,
|
||||||
segment_ids,
|
segment_ids,
|
||||||
|
_allocation: allocation,
|
||||||
};
|
};
|
||||||
MergeOperation {
|
MergeOperation {
|
||||||
inner: inventory.track(inner_merge_operation),
|
inner: inventory.track(inner_merge_operation),
|
||||||
|
|||||||
@@ -2,7 +2,9 @@ use crate::common::MAX_DOC_LIMIT;
|
|||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::directory::TerminatingWrite;
|
||||||
|
use crate::directory::WritePtr;
|
||||||
|
use crate::docset::DocSet;
|
||||||
use crate::fastfield::BytesFastFieldReader;
|
use crate::fastfield::BytesFastFieldReader;
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
@@ -574,12 +576,10 @@ impl IndexMerger {
|
|||||||
let inverted_index = segment_reader.inverted_index(indexed_field);
|
let inverted_index = segment_reader.inverted_index(indexed_field);
|
||||||
let mut segment_postings = inverted_index
|
let mut segment_postings = inverted_index
|
||||||
.read_postings_from_terminfo(term_info, segment_postings_option);
|
.read_postings_from_terminfo(term_info, segment_postings_option);
|
||||||
let mut doc = segment_postings.doc();
|
while segment_postings.advance() {
|
||||||
while doc != TERMINATED {
|
if !segment_reader.is_deleted(segment_postings.doc()) {
|
||||||
if !segment_reader.is_deleted(doc) {
|
|
||||||
return Some((segment_ord, segment_postings));
|
return Some((segment_ord, segment_postings));
|
||||||
}
|
}
|
||||||
doc = segment_postings.advance();
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
})
|
})
|
||||||
@@ -606,9 +606,17 @@ impl IndexMerger {
|
|||||||
// postings serializer.
|
// postings serializer.
|
||||||
for (segment_ord, mut segment_postings) in segment_postings {
|
for (segment_ord, mut segment_postings) in segment_postings {
|
||||||
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
|
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
|
||||||
|
loop {
|
||||||
|
let doc = segment_postings.doc();
|
||||||
|
|
||||||
|
// `.advance()` has been called once before the loop.
|
||||||
|
//
|
||||||
|
// It was required to make sure we only consider segments
|
||||||
|
// that effectively contain at least one non-deleted document
|
||||||
|
// and remove terms that do not have documents associated.
|
||||||
|
//
|
||||||
|
// For this reason, we cannot use a `while segment_postings.advance()` loop.
|
||||||
|
|
||||||
let mut doc = segment_postings.doc();
|
|
||||||
while doc != TERMINATED {
|
|
||||||
// deleted doc are skipped as they do not have a `remapped_doc_id`.
|
// deleted doc are skipped as they do not have a `remapped_doc_id`.
|
||||||
if let Some(remapped_doc_id) = old_to_new_doc_id[doc as usize] {
|
if let Some(remapped_doc_id) = old_to_new_doc_id[doc as usize] {
|
||||||
// we make sure to only write the term iff
|
// we make sure to only write the term iff
|
||||||
@@ -623,8 +631,9 @@ impl IndexMerger {
|
|||||||
delta_positions,
|
delta_positions,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
if !segment_postings.advance() {
|
||||||
doc = segment_postings.advance();
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -653,7 +662,8 @@ impl IndexMerger {
|
|||||||
Ok(term_ordinal_mappings)
|
Ok(term_ordinal_mappings)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> {
|
pub fn write_storable_fields(&self, store_wrt: WritePtr) -> crate::Result<()> {
|
||||||
|
let mut store_writer = StoreWriter::new(store_wrt);
|
||||||
for reader in &self.readers {
|
for reader in &self.readers {
|
||||||
let store_reader = reader.get_store_reader();
|
let store_reader = reader.get_store_reader();
|
||||||
if reader.num_deleted_docs() > 0 {
|
if reader.num_deleted_docs() > 0 {
|
||||||
@@ -665,6 +675,8 @@ impl IndexMerger {
|
|||||||
store_writer.stack(&store_reader)?;
|
store_writer.stack(&store_reader)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
let store_wrt = store_writer.close()?;
|
||||||
|
store_wrt.terminate()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -674,7 +686,6 @@ impl SerializableSegment for IndexMerger {
|
|||||||
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
|
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
|
||||||
self.write_fieldnorms(serializer.get_fieldnorms_serializer())?;
|
self.write_fieldnorms(serializer.get_fieldnorms_serializer())?;
|
||||||
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
|
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
|
||||||
self.write_storable_fields(serializer.get_store_writer())?;
|
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
Ok(self.max_doc)
|
Ok(self.max_doc)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ pub mod delete_queue;
|
|||||||
|
|
||||||
mod doc_opstamp_mapping;
|
mod doc_opstamp_mapping;
|
||||||
pub mod index_writer;
|
pub mod index_writer;
|
||||||
|
mod index_writer_config;
|
||||||
mod log_merge_policy;
|
mod log_merge_policy;
|
||||||
mod merge_operation;
|
mod merge_operation;
|
||||||
pub mod merge_policy;
|
pub mod merge_policy;
|
||||||
pub mod merger;
|
pub mod merger;
|
||||||
pub mod operation;
|
pub mod operation;
|
||||||
mod prepared_commit;
|
mod prepared_commit;
|
||||||
|
mod resource_manager;
|
||||||
mod segment_entry;
|
mod segment_entry;
|
||||||
mod segment_manager;
|
mod segment_manager;
|
||||||
mod segment_register;
|
mod segment_register;
|
||||||
@@ -16,13 +18,17 @@ pub mod segment_updater;
|
|||||||
mod segment_writer;
|
mod segment_writer;
|
||||||
mod stamper;
|
mod stamper;
|
||||||
|
|
||||||
|
pub(crate) use self::resource_manager::{Allocation, ResourceManager};
|
||||||
|
pub(crate) use self::merge_operation::MergeOperationInventory;
|
||||||
pub use self::index_writer::IndexWriter;
|
pub use self::index_writer::IndexWriter;
|
||||||
|
pub use self::index_writer_config::IndexWriterConfig;
|
||||||
pub use self::log_merge_policy::LogMergePolicy;
|
pub use self::log_merge_policy::LogMergePolicy;
|
||||||
pub use self::merge_operation::MergeOperation;
|
pub use self::merge_operation::MergeOperation;
|
||||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||||
pub use self::prepared_commit::PreparedCommit;
|
pub use self::prepared_commit::PreparedCommit;
|
||||||
pub use self::segment_entry::SegmentEntry;
|
pub use self::segment_entry::SegmentEntry;
|
||||||
pub use self::segment_manager::SegmentManager;
|
pub use self::segment_manager::SegmentManager;
|
||||||
|
pub(crate) use self::segment_manager::SegmentRegisters;
|
||||||
pub use self::segment_serializer::SegmentSerializer;
|
pub use self::segment_serializer::SegmentSerializer;
|
||||||
pub use self::segment_writer::SegmentWriter;
|
pub use self::segment_writer::SegmentWriter;
|
||||||
|
|
||||||
|
|||||||
@@ -7,14 +7,20 @@ pub struct PreparedCommit<'a> {
|
|||||||
index_writer: &'a mut IndexWriter,
|
index_writer: &'a mut IndexWriter,
|
||||||
payload: Option<String>,
|
payload: Option<String>,
|
||||||
opstamp: Opstamp,
|
opstamp: Opstamp,
|
||||||
|
soft_commit: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> PreparedCommit<'a> {
|
impl<'a> PreparedCommit<'a> {
|
||||||
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> {
|
pub(crate) fn new(
|
||||||
|
index_writer: &'a mut IndexWriter,
|
||||||
|
opstamp: Opstamp,
|
||||||
|
soft_commit: bool,
|
||||||
|
) -> PreparedCommit<'_> {
|
||||||
PreparedCommit {
|
PreparedCommit {
|
||||||
index_writer,
|
index_writer,
|
||||||
payload: None,
|
payload: None,
|
||||||
opstamp,
|
opstamp,
|
||||||
|
soft_commit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,11 +38,12 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
|
|
||||||
pub fn commit(self) -> crate::Result<Opstamp> {
|
pub fn commit(self) -> crate::Result<Opstamp> {
|
||||||
info!("committing {}", self.opstamp);
|
info!("committing {}", self.opstamp);
|
||||||
let _ = block_on(
|
block_on(self.index_writer.segment_updater().schedule_commit(
|
||||||
self.index_writer
|
self.opstamp,
|
||||||
.segment_updater()
|
self.payload,
|
||||||
.schedule_commit(self.opstamp, self.payload),
|
self.soft_commit,
|
||||||
);
|
))?;
|
||||||
|
block_on(self.index_writer.trigger_commit());
|
||||||
Ok(self.opstamp)
|
Ok(self.opstamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
213
src/indexer/resource_manager.rs
Normal file
213
src/indexer/resource_manager.rs
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
use std::ops::RangeBounds;
|
||||||
|
use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock};
|
||||||
|
|
||||||
|
struct LockedData {
|
||||||
|
count: u64,
|
||||||
|
enabled: bool
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for LockedData {
|
||||||
|
fn default() -> Self {
|
||||||
|
LockedData {
|
||||||
|
count: 0u64,
|
||||||
|
enabled: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Inner {
|
||||||
|
resource_level: Mutex<LockedData>,
|
||||||
|
convdvar: Condvar,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// The resource manager makes it possible to track the amount of level of a given resource.
|
||||||
|
/// There is no magic here : it is to the description of the user to declare how much
|
||||||
|
/// of the resource is being held.
|
||||||
|
///
|
||||||
|
/// Allocation of a resource is bound to the lifetime of a `Allocation` instance.
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// let resource_manager = ResourceManager::default();
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// In tantivy, this is used to check the number of merging thread and the number of memory
|
||||||
|
/// used by the volatile segments.
|
||||||
|
///
|
||||||
|
#[derive(Clone, Default)]
|
||||||
|
pub struct ResourceManager {
|
||||||
|
inner: Arc<Inner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResourceManager {
|
||||||
|
/// Return the total amount of reousrce allocated
|
||||||
|
pub fn total_amount(&self) -> u64 {
|
||||||
|
self.lock().count
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lock(&self) -> MutexGuard<LockedData> {
|
||||||
|
self.inner
|
||||||
|
.resource_level
|
||||||
|
.lock()
|
||||||
|
.expect("Failed to obtain lock for ReservedMemory. This should never happen.")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn record_delta(&self, delta: i64) {
|
||||||
|
if delta == 0i64 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let mut lock = self.lock();
|
||||||
|
let new_val = lock.count as i64 + delta;
|
||||||
|
lock.count = new_val as u64;
|
||||||
|
self.inner.convdvar.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Records a new allocation.
|
||||||
|
///
|
||||||
|
/// The returned allocate object is used to automatically release the allocated resource
|
||||||
|
/// on drop.
|
||||||
|
pub fn allocate(&self, amount: u64) -> Allocation {
|
||||||
|
self.record_delta(amount as i64);
|
||||||
|
Allocation {
|
||||||
|
resource_manager: self.clone(),
|
||||||
|
amount: RwLock::new(amount),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stops the resource manager.
|
||||||
|
///
|
||||||
|
/// If any thread is waiting via `.wait_until_in_range(...)`, the method will stop
|
||||||
|
/// being blocking and will return an error.
|
||||||
|
pub fn terminate(&self) {
|
||||||
|
self.lock().enabled = false;
|
||||||
|
self.inner.convdvar.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blocks the current thread until the resource level reaches the given range,
|
||||||
|
/// in a cpu-efficient way.
|
||||||
|
///
|
||||||
|
/// This method does not necessarily wakes up the current thread at every transition
|
||||||
|
/// into the targetted range, but any durable entry in the range will be detected.
|
||||||
|
pub fn wait_until_in_range<R: RangeBounds<u64>>(&self, range: R) -> Result<u64, u64> {
|
||||||
|
let mut levels = self.lock();
|
||||||
|
if !levels.enabled {
|
||||||
|
return Err(levels.count)
|
||||||
|
}
|
||||||
|
while !range.contains(&levels.count) {
|
||||||
|
levels = self.inner.convdvar.wait(levels).unwrap();
|
||||||
|
if !levels.enabled {
|
||||||
|
return Err(levels.count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(levels.count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Allocation {
|
||||||
|
resource_manager: ResourceManager,
|
||||||
|
amount: RwLock<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Allocation {
|
||||||
|
pub fn amount(&self) -> u64 {
|
||||||
|
*self.amount.read().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn modify(&self, new_amount: u64) {
|
||||||
|
let mut wlock = self.amount.write().unwrap();
|
||||||
|
let delta = new_amount as i64 - *wlock as i64;
|
||||||
|
*wlock = new_amount;
|
||||||
|
self.resource_manager.record_delta(delta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Allocation {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let amount = self.amount();
|
||||||
|
self.resource_manager.record_delta(-(amount as i64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::ResourceManager;
|
||||||
|
use futures::channel::oneshot;
|
||||||
|
use futures::executor::block_on;
|
||||||
|
use std::{mem, thread};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_simple_allocation() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
let _allocation = memory.allocate(10u64);
|
||||||
|
assert_eq!(memory.total_amount(), 10u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multiple_allocation() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
let _allocation = memory.allocate(10u64);
|
||||||
|
let _allocation_2 = memory.allocate(11u64);
|
||||||
|
assert_eq!(memory.total_amount(), 21u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_release_on_drop() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
let allocation = memory.allocate(10u64);
|
||||||
|
let allocation_2 = memory.allocate(11u64);
|
||||||
|
assert_eq!(memory.total_amount(), 21u64);
|
||||||
|
mem::drop(allocation);
|
||||||
|
assert_eq!(memory.total_amount(), 11u64);
|
||||||
|
mem::drop(allocation_2);
|
||||||
|
assert_eq!(memory.total_amount(), 0u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_wait_until() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
let (send, recv) = oneshot::channel::<()>();
|
||||||
|
let memory_clone = memory.clone();
|
||||||
|
thread::spawn(move || {
|
||||||
|
let _allocation1 = memory_clone.allocate(2u64);
|
||||||
|
let _allocation2 = memory_clone.allocate(3u64);
|
||||||
|
let _allocation3 = memory_clone.allocate(4u64);
|
||||||
|
std::mem::drop(_allocation3);
|
||||||
|
assert!(block_on(recv).is_ok());
|
||||||
|
});
|
||||||
|
assert_eq!(memory.wait_until_in_range(5u64..8u64), Ok(5u64));
|
||||||
|
assert!(send.send(()).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_modify_amount() {
|
||||||
|
let memory = ResourceManager::default();
|
||||||
|
let alloc = memory.allocate(2u64);
|
||||||
|
assert_eq!(memory.total_amount(), 2u64);
|
||||||
|
assert_eq!(alloc.amount(), 2u64);
|
||||||
|
let alloc2 = memory.allocate(3u64);
|
||||||
|
assert_eq!(memory.total_amount(), 2u64 + 3u64);
|
||||||
|
assert_eq!(alloc2.amount(), 3u64);
|
||||||
|
alloc.modify(14u64);
|
||||||
|
assert_eq!(alloc.amount(), 14u64);
|
||||||
|
assert_eq!(memory.total_amount(), 14u64 + 3u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_stop_resource_manager() {
|
||||||
|
let resource_manager = ResourceManager::default();
|
||||||
|
let resource_manager_clone = resource_manager.clone();
|
||||||
|
let (sender, recv) = oneshot::channel();
|
||||||
|
let join_handle = thread::spawn(move || {
|
||||||
|
assert!(sender.send(()).is_ok());
|
||||||
|
resource_manager_clone.wait_until_in_range(10..20)
|
||||||
|
});
|
||||||
|
let _ = block_on(recv);
|
||||||
|
resource_manager.terminate();
|
||||||
|
assert_eq!(join_handle.join().unwrap(), Err(0u64));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
use crate::common::BitSet;
|
use crate::common::BitSet;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
|
use crate::directory::ManagedDirectory;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
|
use crate::{Opstamp, Segment};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
/// A segment entry describes the state of
|
/// A segment entry describes the state of
|
||||||
@@ -19,7 +21,7 @@ use std::fmt;
|
|||||||
/// in the .del file or in the `delete_bitset`.
|
/// in the .del file or in the `delete_bitset`.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct SegmentEntry {
|
pub struct SegmentEntry {
|
||||||
meta: SegmentMeta,
|
segment: Segment,
|
||||||
delete_bitset: Option<BitSet>,
|
delete_bitset: Option<BitSet>,
|
||||||
delete_cursor: DeleteCursor,
|
delete_cursor: DeleteCursor,
|
||||||
}
|
}
|
||||||
@@ -27,47 +29,67 @@ pub struct SegmentEntry {
|
|||||||
impl SegmentEntry {
|
impl SegmentEntry {
|
||||||
/// Create a new `SegmentEntry`
|
/// Create a new `SegmentEntry`
|
||||||
pub fn new(
|
pub fn new(
|
||||||
segment_meta: SegmentMeta,
|
segment: Segment,
|
||||||
delete_cursor: DeleteCursor,
|
delete_cursor: DeleteCursor,
|
||||||
delete_bitset: Option<BitSet>,
|
delete_bitset: Option<BitSet>,
|
||||||
) -> SegmentEntry {
|
) -> SegmentEntry {
|
||||||
SegmentEntry {
|
SegmentEntry {
|
||||||
meta: segment_meta,
|
segment,
|
||||||
delete_bitset,
|
delete_bitset,
|
||||||
delete_cursor,
|
delete_cursor,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a reference to the segment entry deleted bitset.
|
pub fn persist(&mut self, dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||||
///
|
// TODO take in account delete bitset?
|
||||||
/// `DocId` in this bitset are flagged as deleted.
|
self.segment.persist(dest_directory)?;
|
||||||
pub fn delete_bitset(&self) -> Option<&BitSet> {
|
Ok(())
|
||||||
self.delete_bitset.as_ref()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the `SegmentMeta` for this segment.
|
pub fn set_delete_cursor(&mut self, delete_cursor: DeleteCursor) {
|
||||||
pub fn set_meta(&mut self, segment_meta: SegmentMeta) {
|
self.delete_cursor = delete_cursor;
|
||||||
self.meta = segment_meta;
|
}
|
||||||
|
|
||||||
|
/// `Takes` (as in Option::take) the delete bitset of a segment entry.
|
||||||
|
/// `DocId` in this bitset are flagged as deleted.
|
||||||
|
pub fn take_delete_bitset(&mut self) -> Option<BitSet> {
|
||||||
|
self.delete_bitset.take()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reset the delete information in this segment.
|
||||||
|
///
|
||||||
|
/// The `SegmentEntry` segment's `SegmentMeta` gets updated, and
|
||||||
|
/// any delete bitset is drop and set to None.
|
||||||
|
pub fn reset_delete_meta(&mut self, num_deleted_docs: u32, target_opstamp: Opstamp) {
|
||||||
|
self.segment = self
|
||||||
|
.segment
|
||||||
|
.clone()
|
||||||
|
.with_delete_meta(num_deleted_docs, target_opstamp);
|
||||||
|
self.delete_bitset = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a reference to the segment_entry's delete cursor
|
/// Return a reference to the segment_entry's delete cursor
|
||||||
pub fn delete_cursor(&mut self) -> &mut DeleteCursor {
|
pub fn delete_cursor(&mut self) -> DeleteCursor {
|
||||||
&mut self.delete_cursor
|
self.delete_cursor.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the segment id.
|
/// Returns the segment id.
|
||||||
pub fn segment_id(&self) -> SegmentId {
|
pub fn segment_id(&self) -> SegmentId {
|
||||||
self.meta.id()
|
self.meta().id()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the `segment` associated to the `SegmentEntry`.
|
||||||
|
pub fn segment(&self) -> &Segment {
|
||||||
|
&self.segment
|
||||||
|
}
|
||||||
/// Accessor to the `SegmentMeta`
|
/// Accessor to the `SegmentMeta`
|
||||||
pub fn meta(&self) -> &SegmentMeta {
|
pub fn meta(&self) -> &SegmentMeta {
|
||||||
&self.meta
|
self.segment.meta()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for SegmentEntry {
|
impl fmt::Debug for SegmentEntry {
|
||||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(formatter, "SegmentEntry({:?})", self.meta)
|
write!(formatter, "SegmentEntry({:?})", self.meta())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,15 +2,15 @@ use super::segment_register::SegmentRegister;
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::{SegmentEntry, MergeOperationInventory, MergeCandidate, MergeOperation};
|
||||||
use crate::indexer::SegmentEntry;
|
use crate::{Segment, Opstamp};
|
||||||
use std::collections::hash_set::HashSet;
|
use std::collections::hash_set::HashSet;
|
||||||
use std::fmt::{self, Debug, Formatter};
|
use std::fmt::{self, Debug, Formatter};
|
||||||
use std::sync::RwLock;
|
use std::sync::{Arc, RwLock};
|
||||||
use std::sync::{RwLockReadGuard, RwLockWriteGuard};
|
use std::sync::{RwLockReadGuard, RwLockWriteGuard};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct SegmentRegisters {
|
pub(crate) struct SegmentRegisters {
|
||||||
uncommitted: SegmentRegister,
|
uncommitted: SegmentRegister,
|
||||||
committed: SegmentRegister,
|
committed: SegmentRegister,
|
||||||
}
|
}
|
||||||
@@ -22,6 +22,17 @@ pub(crate) enum SegmentsStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentRegisters {
|
impl SegmentRegisters {
|
||||||
|
pub fn new(committed: SegmentRegister) -> SegmentRegisters {
|
||||||
|
SegmentRegisters {
|
||||||
|
uncommitted: Default::default(),
|
||||||
|
committed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn committed_segment(&self) -> Vec<Segment> {
|
||||||
|
self.committed.segments()
|
||||||
|
}
|
||||||
|
|
||||||
/// Check if all the segments are committed or uncommited.
|
/// Check if all the segments are committed or uncommited.
|
||||||
///
|
///
|
||||||
/// If some segment is missing or segments are in a different state (this should not happen
|
/// If some segment is missing or segments are in a different state (this should not happen
|
||||||
@@ -44,7 +55,8 @@ impl SegmentRegisters {
|
|||||||
/// changes (merges especially)
|
/// changes (merges especially)
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct SegmentManager {
|
pub struct SegmentManager {
|
||||||
registers: RwLock<SegmentRegisters>,
|
registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
|
merge_operations: MergeOperationInventory,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for SegmentManager {
|
impl Debug for SegmentManager {
|
||||||
@@ -58,34 +70,28 @@ impl Debug for SegmentManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_mergeable_segments(
|
|
||||||
in_merge_segment_ids: &HashSet<SegmentId>,
|
|
||||||
segment_manager: &SegmentManager,
|
|
||||||
) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
|
||||||
let registers_lock = segment_manager.read();
|
|
||||||
(
|
|
||||||
registers_lock
|
|
||||||
.committed
|
|
||||||
.get_mergeable_segments(in_merge_segment_ids),
|
|
||||||
registers_lock
|
|
||||||
.uncommitted
|
|
||||||
.get_mergeable_segments(in_merge_segment_ids),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SegmentManager {
|
impl SegmentManager {
|
||||||
pub fn from_segments(
|
pub(crate) fn new(registers: Arc<RwLock<SegmentRegisters>>) -> SegmentManager {
|
||||||
segment_metas: Vec<SegmentMeta>,
|
|
||||||
delete_cursor: &DeleteCursor,
|
|
||||||
) -> SegmentManager {
|
|
||||||
SegmentManager {
|
SegmentManager {
|
||||||
registers: RwLock::new(SegmentRegisters {
|
registers,
|
||||||
uncommitted: SegmentRegister::default(),
|
merge_operations: Default::default()
|
||||||
committed: SegmentRegister::new(segment_metas, delete_cursor),
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn new_merge_operation(&self, opstamp: Opstamp, merge_candidate: MergeCandidate) -> MergeOperation {
|
||||||
|
MergeOperation::new(
|
||||||
|
&self.merge_operations,
|
||||||
|
opstamp,
|
||||||
|
merge_candidate.0
|
||||||
|
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wait_merging_thread(&self) {
|
||||||
|
self.merge_operations.wait_until_empty()
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns all of the segment entries (committed or uncommitted)
|
/// Returns all of the segment entries (committed or uncommitted)
|
||||||
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
||||||
let registers_lock = self.read();
|
let registers_lock = self.read();
|
||||||
@@ -94,6 +100,34 @@ impl SegmentManager {
|
|||||||
segment_entries
|
segment_entries
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the segments that are currently not in merge.
|
||||||
|
///
|
||||||
|
/// They are split over two `Vec`. The committed segments on one hand
|
||||||
|
/// and the uncommitted ones on the other hand.
|
||||||
|
///
|
||||||
|
/// This method is useful when searching for merge candidate or segments
|
||||||
|
/// to persists.
|
||||||
|
pub fn segments_not_in_merge(&self) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
||||||
|
let in_merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
||||||
|
let registers_lock = self.read();
|
||||||
|
(
|
||||||
|
registers_lock
|
||||||
|
.committed
|
||||||
|
.get_mergeable_segments(&in_merge_segment_ids),
|
||||||
|
registers_lock
|
||||||
|
.uncommitted
|
||||||
|
.get_mergeable_segments(&in_merge_segment_ids),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn largest_segment_not_in_merge(&self) -> Option<SegmentMeta> {
|
||||||
|
let (committed, uncommitted) = self.segments_not_in_merge();
|
||||||
|
let mut segments = vec![];
|
||||||
|
segments.extend(committed);
|
||||||
|
segments.extend(uncommitted);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
// Lock poisoning should never happen :
|
// Lock poisoning should never happen :
|
||||||
// The lock is acquired and released within this class,
|
// The lock is acquired and released within this class,
|
||||||
// and the operations cannot panic.
|
// and the operations cannot panic.
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
|
use crate::directory::ManagedDirectory;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
use crate::indexer::segment_entry::SegmentEntry;
|
use crate::indexer::segment_entry::SegmentEntry;
|
||||||
|
use crate::schema::Schema;
|
||||||
|
use crate::Segment;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt::{self, Debug, Formatter};
|
use std::fmt::{self, Debug, Formatter};
|
||||||
@@ -46,6 +49,13 @@ impl SegmentRegister {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn segments(&self) -> Vec<Segment> {
|
||||||
|
self.segment_states
|
||||||
|
.values()
|
||||||
|
.map(|segment_entry| segment_entry.segment().clone())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
||||||
self.segment_states.values().cloned().collect()
|
self.segment_states.values().cloned().collect()
|
||||||
}
|
}
|
||||||
@@ -79,11 +89,17 @@ impl SegmentRegister {
|
|||||||
self.segment_states.get(segment_id).cloned()
|
self.segment_states.get(segment_id).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister {
|
pub fn new(
|
||||||
|
directory: &ManagedDirectory,
|
||||||
|
schema: &Schema,
|
||||||
|
segment_metas: Vec<SegmentMeta>,
|
||||||
|
delete_cursor: &DeleteCursor,
|
||||||
|
) -> SegmentRegister {
|
||||||
let mut segment_states = HashMap::new();
|
let mut segment_states = HashMap::new();
|
||||||
for segment_meta in segment_metas {
|
for segment_meta in segment_metas {
|
||||||
let segment_id = segment_meta.id();
|
let segment_id = segment_meta.id();
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None);
|
let segment = Segment::new_persisted(segment_meta, directory.clone(), schema.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment, delete_cursor.clone(), None);
|
||||||
segment_states.insert(segment_id, segment_entry);
|
segment_states.insert(segment_id, segment_entry);
|
||||||
}
|
}
|
||||||
SegmentRegister { segment_states }
|
SegmentRegister { segment_states }
|
||||||
@@ -95,6 +111,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::core::{SegmentId, SegmentMetaInventory};
|
use crate::core::{SegmentId, SegmentMetaInventory};
|
||||||
use crate::indexer::delete_queue::*;
|
use crate::indexer::delete_queue::*;
|
||||||
|
use crate::indexer::ResourceManager;
|
||||||
|
|
||||||
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
|
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
|
||||||
segment_register
|
segment_register
|
||||||
@@ -108,28 +125,34 @@ mod tests {
|
|||||||
fn test_segment_register() {
|
fn test_segment_register() {
|
||||||
let inventory = SegmentMetaInventory::default();
|
let inventory = SegmentMetaInventory::default();
|
||||||
let delete_queue = DeleteQueue::new();
|
let delete_queue = DeleteQueue::new();
|
||||||
|
let schema = Schema::builder().build();
|
||||||
|
|
||||||
let mut segment_register = SegmentRegister::default();
|
let mut segment_register = SegmentRegister::default();
|
||||||
let segment_id_a = SegmentId::generate_random();
|
let segment_id_a = SegmentId::generate_random();
|
||||||
let segment_id_b = SegmentId::generate_random();
|
let segment_id_b = SegmentId::generate_random();
|
||||||
let segment_id_merged = SegmentId::generate_random();
|
let segment_id_merged = SegmentId::generate_random();
|
||||||
|
|
||||||
|
let memory_manager = ResourceManager::default();
|
||||||
|
|
||||||
{
|
{
|
||||||
let segment_meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
let meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
let segment = Segment::new_volatile(meta, schema.clone(), memory_manager.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||||
segment_register.add_segment_entry(segment_entry);
|
segment_register.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
|
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
|
||||||
{
|
{
|
||||||
let segment_meta = inventory.new_segment_meta(segment_id_b, 0u32);
|
let meta = inventory.new_segment_meta(segment_id_b, 0u32);
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
let segment = Segment::new_volatile(meta, schema.clone(), memory_manager.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||||
segment_register.add_segment_entry(segment_entry);
|
segment_register.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
segment_register.remove_segment(&segment_id_a);
|
segment_register.remove_segment(&segment_id_a);
|
||||||
segment_register.remove_segment(&segment_id_b);
|
segment_register.remove_segment(&segment_id_b);
|
||||||
{
|
{
|
||||||
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32);
|
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32);
|
||||||
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None);
|
let segment_merged = Segment::new_volatile(segment_meta_merged, schema.clone(), memory_manager.clone());
|
||||||
|
let segment_entry = SegmentEntry::new(segment_merged, delete_queue.cursor(), None);
|
||||||
segment_register.add_segment_entry(segment_entry);
|
segment_register.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
|
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
|
||||||
|
|||||||
@@ -3,12 +3,10 @@ use crate::core::SegmentComponent;
|
|||||||
use crate::fastfield::FastFieldSerializer;
|
use crate::fastfield::FastFieldSerializer;
|
||||||
use crate::fieldnorm::FieldNormsSerializer;
|
use crate::fieldnorm::FieldNormsSerializer;
|
||||||
use crate::postings::InvertedIndexSerializer;
|
use crate::postings::InvertedIndexSerializer;
|
||||||
use crate::store::StoreWriter;
|
|
||||||
|
|
||||||
/// Segment serializer is in charge of laying out on disk
|
/// Segment serializer is in charge of laying out on disk
|
||||||
/// the data accumulated and sorted by the `SegmentWriter`.
|
/// the data accumulated and sorted by the `SegmentWriter`.
|
||||||
pub struct SegmentSerializer {
|
pub struct SegmentSerializer {
|
||||||
store_writer: StoreWriter,
|
|
||||||
fast_field_serializer: FastFieldSerializer,
|
fast_field_serializer: FastFieldSerializer,
|
||||||
fieldnorms_serializer: FieldNormsSerializer,
|
fieldnorms_serializer: FieldNormsSerializer,
|
||||||
postings_serializer: InvertedIndexSerializer,
|
postings_serializer: InvertedIndexSerializer,
|
||||||
@@ -17,8 +15,6 @@ pub struct SegmentSerializer {
|
|||||||
impl SegmentSerializer {
|
impl SegmentSerializer {
|
||||||
/// Creates a new `SegmentSerializer`.
|
/// Creates a new `SegmentSerializer`.
|
||||||
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
|
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
|
||||||
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
|
||||||
|
|
||||||
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
||||||
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
|
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
|
||||||
|
|
||||||
@@ -27,7 +23,6 @@ impl SegmentSerializer {
|
|||||||
|
|
||||||
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
||||||
Ok(SegmentSerializer {
|
Ok(SegmentSerializer {
|
||||||
store_writer: StoreWriter::new(store_write),
|
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
fieldnorms_serializer,
|
fieldnorms_serializer,
|
||||||
postings_serializer,
|
postings_serializer,
|
||||||
@@ -49,16 +44,10 @@ impl SegmentSerializer {
|
|||||||
&mut self.fieldnorms_serializer
|
&mut self.fieldnorms_serializer
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `StoreWriter`.
|
|
||||||
pub fn get_store_writer(&mut self) -> &mut StoreWriter {
|
|
||||||
&mut self.store_writer
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Finalize the segment serialization.
|
/// Finalize the segment serialization.
|
||||||
pub fn close(self) -> crate::Result<()> {
|
pub fn close(self) -> crate::Result<()> {
|
||||||
self.fast_field_serializer.close()?;
|
self.fast_field_serializer.close()?;
|
||||||
self.postings_serializer.close()?;
|
self.postings_serializer.close()?;
|
||||||
self.store_writer.close()?;
|
|
||||||
self.fieldnorms_serializer.close()?;
|
self.fieldnorms_serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use super::segment_manager::{get_mergeable_segments, SegmentManager};
|
use super::segment_manager::SegmentManager;
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::core::IndexMeta;
|
use crate::core::IndexMeta;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
@@ -7,22 +7,21 @@ use crate::core::SegmentMeta;
|
|||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
|
||||||
use crate::indexer::index_writer::advance_deletes;
|
use crate::indexer::index_writer::advance_deletes;
|
||||||
use crate::indexer::merge_operation::MergeOperationInventory;
|
|
||||||
use crate::indexer::merger::IndexMerger;
|
use crate::indexer::merger::IndexMerger;
|
||||||
use crate::indexer::segment_manager::SegmentsStatus;
|
use crate::indexer::segment_manager::{SegmentRegisters, SegmentsStatus};
|
||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
use crate::indexer::SegmentEntry;
|
use crate::indexer::SegmentEntry;
|
||||||
use crate::indexer::SegmentSerializer;
|
use crate::indexer::SegmentSerializer;
|
||||||
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
||||||
use crate::indexer::{MergeCandidate, MergeOperation};
|
use crate::indexer::{MergeCandidate, MergeOperation};
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::{Opstamp, SegmentComponent};
|
||||||
use futures::channel::oneshot;
|
use futures::channel::oneshot;
|
||||||
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
use futures::future::TryFutureExt;
|
use futures::future::TryFutureExt;
|
||||||
|
use serde_json;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -116,15 +115,14 @@ fn merge(
|
|||||||
|
|
||||||
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = index.segment(segment_entry.meta().clone());
|
advance_deletes(segment_entry, target_opstamp)?;
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let delete_cursor = segment_entries[0].delete_cursor().clone();
|
let delete_cursor = segment_entries[0].delete_cursor();
|
||||||
|
|
||||||
let segments: Vec<Segment> = segment_entries
|
let segments: Vec<Segment> = segment_entries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|segment_entry| index.segment(segment_entry.meta().clone()))
|
.map(|segment_entry| segment_entry.segment().clone())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// An IndexMerger is like a "view" of our merged segments.
|
// An IndexMerger is like a "view" of our merged segments.
|
||||||
@@ -133,11 +131,16 @@ fn merge(
|
|||||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
||||||
|
|
||||||
let num_docs = merger.write(segment_serializer)?;
|
let store_wrt = merged_segment.open_write(SegmentComponent::STORE)?;
|
||||||
|
merger.write_storable_fields(store_wrt)?;
|
||||||
|
|
||||||
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
let max_doc = merger.write(segment_serializer)?;
|
||||||
|
|
||||||
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
Ok(SegmentEntry::new(
|
||||||
|
merged_segment.with_max_doc(max_doc),
|
||||||
|
delete_cursor,
|
||||||
|
None,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct InnerSegmentUpdater {
|
pub(crate) struct InnerSegmentUpdater {
|
||||||
@@ -156,17 +159,15 @@ pub(crate) struct InnerSegmentUpdater {
|
|||||||
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
||||||
killed: AtomicBool,
|
killed: AtomicBool,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
merge_operations: MergeOperationInventory,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentUpdater {
|
impl SegmentUpdater {
|
||||||
pub fn create(
|
pub fn create(
|
||||||
|
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
index: Index,
|
index: Index,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
delete_cursor: &DeleteCursor,
|
|
||||||
) -> crate::Result<SegmentUpdater> {
|
) -> crate::Result<SegmentUpdater> {
|
||||||
let segments = index.searchable_segment_metas()?;
|
let segment_manager = SegmentManager::new(segment_registers);
|
||||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
|
||||||
let pool = ThreadPoolBuilder::new()
|
let pool = ThreadPoolBuilder::new()
|
||||||
.name_prefix("segment_updater")
|
.name_prefix("segment_updater")
|
||||||
.pool_size(1)
|
.pool_size(1)
|
||||||
@@ -195,7 +196,6 @@ impl SegmentUpdater {
|
|||||||
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
||||||
killed: AtomicBool::new(false),
|
killed: AtomicBool::new(false),
|
||||||
stamper,
|
stamper,
|
||||||
merge_operations: Default::default(),
|
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -261,8 +261,7 @@ impl SegmentUpdater {
|
|||||||
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
||||||
let mut segment_entries = self.segment_manager.segment_entries();
|
let mut segment_entries = self.segment_manager.segment_entries();
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = self.index.segment(segment_entry.meta().clone());
|
advance_deletes(segment_entry, target_opstamp)?;
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
|
||||||
}
|
}
|
||||||
Ok(segment_entries)
|
Ok(segment_entries)
|
||||||
}
|
}
|
||||||
@@ -330,12 +329,21 @@ impl SegmentUpdater {
|
|||||||
&self,
|
&self,
|
||||||
opstamp: Opstamp,
|
opstamp: Opstamp,
|
||||||
payload: Option<String>,
|
payload: Option<String>,
|
||||||
|
soft_commit: bool,
|
||||||
) -> impl Future<Output = crate::Result<()>> {
|
) -> impl Future<Output = crate::Result<()>> {
|
||||||
let segment_updater: SegmentUpdater = self.clone();
|
let segment_updater: SegmentUpdater = self.clone();
|
||||||
|
let directory = self.index.directory().clone();
|
||||||
self.schedule_future(async move {
|
self.schedule_future(async move {
|
||||||
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
let mut segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||||
|
if !soft_commit {
|
||||||
|
for segment_entry in &mut segment_entries {
|
||||||
|
segment_entry.persist(directory.clone())?;
|
||||||
|
}
|
||||||
|
}
|
||||||
segment_updater.segment_manager.commit(segment_entries);
|
segment_updater.segment_manager.commit(segment_entries);
|
||||||
segment_updater.save_metas(opstamp, payload)?;
|
if !soft_commit {
|
||||||
|
segment_updater.save_metas(opstamp, payload)?;
|
||||||
|
}
|
||||||
let _ = garbage_collect_files(segment_updater.clone()).await;
|
let _ = garbage_collect_files(segment_updater.clone()).await;
|
||||||
segment_updater.consider_merge_options().await;
|
segment_updater.consider_merge_options().await;
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -352,7 +360,7 @@ impl SegmentUpdater {
|
|||||||
|
|
||||||
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
|
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
|
||||||
let commit_opstamp = self.load_metas().opstamp;
|
let commit_opstamp = self.load_metas().opstamp;
|
||||||
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
|
self.segment_manager.new_merge_operation(commit_opstamp, MergeCandidate(segment_ids.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Starts a merge operation. This function will block until the merge operation is effectively
|
// Starts a merge operation. This function will block until the merge operation is effectively
|
||||||
@@ -426,9 +434,8 @@ impl SegmentUpdater {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn consider_merge_options(&self) {
|
async fn consider_merge_options(&self) {
|
||||||
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
|
||||||
let (committed_segments, uncommitted_segments) =
|
let (committed_segments, uncommitted_segments) =
|
||||||
get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
|
self.segment_manager.segments_not_in_merge();
|
||||||
|
|
||||||
// Committed segments cannot be merged with uncommitted_segments.
|
// Committed segments cannot be merged with uncommitted_segments.
|
||||||
// We therefore consider merges using these two sets of segments independently.
|
// We therefore consider merges using these two sets of segments independently.
|
||||||
@@ -439,7 +446,7 @@ impl SegmentUpdater {
|
|||||||
.compute_merge_candidates(&uncommitted_segments)
|
.compute_merge_candidates(&uncommitted_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate| {
|
.map(|merge_candidate| {
|
||||||
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0)
|
self.segment_manager.new_merge_operation(current_opstamp, merge_candidate)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -448,7 +455,7 @@ impl SegmentUpdater {
|
|||||||
.compute_merge_candidates(&committed_segments)
|
.compute_merge_candidates(&committed_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate: MergeCandidate| {
|
.map(|merge_candidate: MergeCandidate| {
|
||||||
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
self.segment_manager.new_merge_operation(commit_opstamp, merge_candidate)
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
merge_candidates.extend(committed_merge_candidates.into_iter());
|
||||||
@@ -473,17 +480,13 @@ impl SegmentUpdater {
|
|||||||
let end_merge_future = self.schedule_future(async move {
|
let end_merge_future = self.schedule_future(async move {
|
||||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||||
{
|
{
|
||||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
let mut delete_cursor = after_merge_segment_entry.delete_cursor();
|
||||||
if let Some(delete_operation) = delete_cursor.get() {
|
if let Some(delete_operation) = delete_cursor.get() {
|
||||||
let committed_opstamp = segment_updater.load_metas().opstamp;
|
let committed_opstamp = segment_updater.load_metas().opstamp;
|
||||||
if delete_operation.opstamp < committed_opstamp {
|
if delete_operation.opstamp < committed_opstamp {
|
||||||
let index = &segment_updater.index;
|
if let Err(e) =
|
||||||
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
advance_deletes(&mut after_merge_segment_entry, committed_opstamp)
|
||||||
if let Err(e) = advance_deletes(
|
{
|
||||||
segment,
|
|
||||||
&mut after_merge_segment_entry,
|
|
||||||
committed_opstamp,
|
|
||||||
) {
|
|
||||||
error!(
|
error!(
|
||||||
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
|
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
|
||||||
merge_operation.segment_ids(),
|
merge_operation.segment_ids(),
|
||||||
@@ -532,9 +535,8 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Obsolete files will eventually be cleaned up
|
/// Obsolete files will eventually be cleaned up
|
||||||
/// by the directory garbage collector.
|
/// by the directory garbage collector.
|
||||||
pub fn wait_merging_thread(&self) -> crate::Result<()> {
|
pub fn wait_merging_thread(&self) {
|
||||||
self.merge_operations.wait_until_empty();
|
self.segment_manager.wait_merging_thread()
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -543,7 +545,8 @@ mod tests {
|
|||||||
|
|
||||||
use crate::indexer::merge_policy::tests::MergeWheneverPossible;
|
use crate::indexer::merge_policy::tests::MergeWheneverPossible;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::Index;
|
use crate::{Index, SegmentId};
|
||||||
|
use futures::executor::block_on;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_during_merge() {
|
fn test_delete_during_merge() {
|
||||||
@@ -694,4 +697,27 @@ mod tests {
|
|||||||
.segment_entries();
|
.segment_entries();
|
||||||
assert!(seg_vec.is_empty());
|
assert!(seg_vec.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merge_over_soft_commit() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
// writing the segment
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
|
assert!(index_writer.soft_commit().is_ok());
|
||||||
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
|
assert!(index_writer.soft_commit().is_ok());
|
||||||
|
|
||||||
|
let reader = index_writer.reader(1).unwrap();
|
||||||
|
let segment_ids: Vec<SegmentId> = reader
|
||||||
|
.searcher()
|
||||||
|
.segment_readers()
|
||||||
|
.iter()
|
||||||
|
.map(|reader| reader.segment_id())
|
||||||
|
.collect();
|
||||||
|
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
use super::operation::AddOperation;
|
use super::operation::AddOperation;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
|
use crate::directory::{SpillingResult, SpillingWriter, TerminatingWrite};
|
||||||
use crate::fastfield::FastFieldsWriter;
|
use crate::fastfield::FastFieldsWriter;
|
||||||
use crate::fieldnorm::FieldNormsWriter;
|
use crate::fieldnorm::FieldNormsWriter;
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
|
use crate::indexer::IndexWriterConfig;
|
||||||
use crate::postings::compute_table_size;
|
use crate::postings::compute_table_size;
|
||||||
use crate::postings::MultiFieldPostingsWriter;
|
use crate::postings::MultiFieldPostingsWriter;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
@@ -11,21 +13,24 @@ use crate::schema::Schema;
|
|||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::schema::{Field, FieldEntry};
|
use crate::schema::{Field, FieldEntry};
|
||||||
|
use crate::store::StoreWriter;
|
||||||
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
||||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
use crate::tokenizer::{FacetTokenizer, TextAnalyzer, TokenizerManager};
|
||||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||||
use crate::DocId;
|
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
|
use crate::{DocId, SegmentComponent};
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use std::io::Write;
|
||||||
use std::str;
|
use std::str;
|
||||||
|
use crate::indexer::resource_manager::ResourceManager;
|
||||||
|
|
||||||
/// Computes the initial size of the hash table.
|
/// Computes the initial size of the hash table.
|
||||||
///
|
///
|
||||||
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
|
/// Returns a number of bit `b`, such that the recommended initial table size is 2^b.
|
||||||
fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
|
fn initial_table_size(per_thread_memory_budget: u64) -> crate::Result<usize> {
|
||||||
let table_memory_upper_bound = per_thread_memory_budget / 3;
|
let table_memory_upper_bound = per_thread_memory_budget / 3u64;
|
||||||
if let Some(limit) = (10..)
|
if let Some(limit) = (10..)
|
||||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_memory_upper_bound)
|
.take_while(|num_bits| compute_table_size(*num_bits) < table_memory_upper_bound)
|
||||||
.last()
|
.last()
|
||||||
{
|
{
|
||||||
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
|
Ok(limit.min(19)) // we cap it at 2^19 = 512K.
|
||||||
@@ -43,11 +48,13 @@ fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
|
|||||||
pub struct SegmentWriter {
|
pub struct SegmentWriter {
|
||||||
max_doc: DocId,
|
max_doc: DocId,
|
||||||
multifield_postings: MultiFieldPostingsWriter,
|
multifield_postings: MultiFieldPostingsWriter,
|
||||||
segment_serializer: SegmentSerializer,
|
segment: Segment,
|
||||||
fast_field_writers: FastFieldsWriter,
|
fast_field_writers: FastFieldsWriter,
|
||||||
fieldnorms_writer: FieldNormsWriter,
|
fieldnorms_writer: FieldNormsWriter,
|
||||||
doc_opstamps: Vec<Opstamp>,
|
doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
tokenizers: Vec<Option<TextAnalyzer>>,
|
||||||
|
store_writer: StoreWriter<SpillingWriter>,
|
||||||
|
memory_manager: ResourceManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentWriter {
|
impl SegmentWriter {
|
||||||
@@ -61,12 +68,13 @@ impl SegmentWriter {
|
|||||||
/// - segment: The segment being written
|
/// - segment: The segment being written
|
||||||
/// - schema
|
/// - schema
|
||||||
pub fn for_segment(
|
pub fn for_segment(
|
||||||
memory_budget: usize,
|
config: &IndexWriterConfig,
|
||||||
mut segment: Segment,
|
segment: Segment,
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
|
tokenizer_manager: &TokenizerManager,
|
||||||
|
memory_manager: ResourceManager
|
||||||
) -> crate::Result<SegmentWriter> {
|
) -> crate::Result<SegmentWriter> {
|
||||||
let table_num_bits = initial_table_size(memory_budget)?;
|
let table_num_bits = initial_table_size(config.heap_size_in_byte_per_thread())?;
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
|
||||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||||
let tokenizers = schema
|
let tokenizers = schema
|
||||||
.fields()
|
.fields()
|
||||||
@@ -76,20 +84,32 @@ impl SegmentWriter {
|
|||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.and_then(|text_index_option| {
|
.and_then(|text_index_option| {
|
||||||
let tokenizer_name = &text_index_option.tokenizer();
|
let tokenizer_name = &text_index_option.tokenizer();
|
||||||
segment.index().tokenizers().get(tokenizer_name)
|
tokenizer_manager.get(tokenizer_name)
|
||||||
}),
|
}),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.collect();
|
.collect();
|
||||||
|
let mut segment_clone = segment.clone();
|
||||||
|
let spilling_wrt = SpillingWriter::new(
|
||||||
|
50_000_000,
|
||||||
|
Box::new(move || {
|
||||||
|
segment_clone
|
||||||
|
.open_write(SegmentComponent::STORE)
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
let store_writer = StoreWriter::new(spilling_wrt);
|
||||||
Ok(SegmentWriter {
|
Ok(SegmentWriter {
|
||||||
max_doc: 0,
|
max_doc: 0,
|
||||||
multifield_postings,
|
multifield_postings,
|
||||||
fieldnorms_writer: FieldNormsWriter::for_schema(schema),
|
fieldnorms_writer: FieldNormsWriter::for_schema(schema),
|
||||||
segment_serializer,
|
segment,
|
||||||
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
||||||
doc_opstamps: Vec::with_capacity(1_000),
|
doc_opstamps: Vec::with_capacity(1_000),
|
||||||
tokenizers,
|
tokenizers,
|
||||||
|
store_writer,
|
||||||
|
memory_manager
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,18 +117,33 @@ impl SegmentWriter {
|
|||||||
///
|
///
|
||||||
/// Finalize consumes the `SegmentWriter`, so that it cannot
|
/// Finalize consumes the `SegmentWriter`, so that it cannot
|
||||||
/// be used afterwards.
|
/// be used afterwards.
|
||||||
pub fn finalize(mut self) -> crate::Result<Vec<u64>> {
|
pub fn finalize(mut self) -> crate::Result<(Segment, Vec<u64>)> {
|
||||||
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
|
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
|
||||||
|
let spilling_wrt = self.store_writer.close()?;
|
||||||
|
let mut segment: Segment;
|
||||||
|
match spilling_wrt.finalize()? {
|
||||||
|
SpillingResult::Spilled => {
|
||||||
|
segment = self.segment.clone();
|
||||||
|
}
|
||||||
|
SpillingResult::Buffer(buf) => {
|
||||||
|
segment = self.segment.into_volatile(self.memory_manager.clone());
|
||||||
|
let mut store_wrt = segment.open_write(SegmentComponent::STORE)?;
|
||||||
|
store_wrt.write_all(&buf[..])?;
|
||||||
|
store_wrt.terminate()?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
||||||
|
segment = segment.with_max_doc(self.max_doc);
|
||||||
write(
|
write(
|
||||||
&self.multifield_postings,
|
&self.multifield_postings,
|
||||||
&self.fast_field_writers,
|
&self.fast_field_writers,
|
||||||
&self.fieldnorms_writer,
|
&self.fieldnorms_writer,
|
||||||
self.segment_serializer,
|
segment_serializer,
|
||||||
)?;
|
)?;
|
||||||
Ok(self.doc_opstamps)
|
Ok((segment, self.doc_opstamps))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> u64 {
|
||||||
self.multifield_postings.mem_usage()
|
self.multifield_postings.mem_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -246,8 +281,7 @@ impl SegmentWriter {
|
|||||||
}
|
}
|
||||||
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
||||||
doc.prepare_for_store();
|
doc.prepare_for_store();
|
||||||
let doc_writer = self.segment_serializer.get_store_writer();
|
self.store_writer.store(&doc)?;
|
||||||
doc_writer.store(&doc)?;
|
|
||||||
self.max_doc += 1;
|
self.max_doc += 1;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
43
src/lib.rs
43
src/lib.rs
@@ -156,14 +156,14 @@ mod snippet;
|
|||||||
pub use self::snippet::{Snippet, SnippetGenerator};
|
pub use self::snippet::{Snippet, SnippetGenerator};
|
||||||
|
|
||||||
mod docset;
|
mod docset;
|
||||||
pub use self::docset::{DocSet, TERMINATED};
|
pub use self::docset::{DocSet, SkipResult};
|
||||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||||
pub use crate::core::{Executor, SegmentComponent};
|
pub use crate::core::{Executor, SegmentComponent};
|
||||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||||
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
||||||
pub use crate::directory::Directory;
|
pub use crate::directory::Directory;
|
||||||
pub use crate::indexer::operation::UserOperation;
|
pub use crate::indexer::operation::UserOperation;
|
||||||
pub use crate::indexer::IndexWriter;
|
pub use crate::indexer::{IndexWriter, IndexWriterConfig};
|
||||||
pub use crate::postings::Postings;
|
pub use crate::postings::Postings;
|
||||||
pub use crate::reader::LeasedItem;
|
pub use crate::reader::LeasedItem;
|
||||||
pub use crate::schema::{Document, Term};
|
pub use crate::schema::{Document, Term};
|
||||||
@@ -285,7 +285,7 @@ mod tests {
|
|||||||
|
|
||||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::query::BooleanQuery;
|
use crate::query::BooleanQuery;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
@@ -381,12 +381,19 @@ mod tests {
|
|||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"a"));
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"a a"));
|
let doc = doc!(text_field=>"a");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let doc = doc!(text_field=>"a a");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"c"));
|
let doc = doc!(text_field=>"c");
|
||||||
|
index_writer.add_document(doc);
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@@ -465,12 +472,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
||||||
let mut doc = docset.advance();
|
while docset.advance() {
|
||||||
while doc != TERMINATED {
|
if !reader.is_deleted(docset.doc()) {
|
||||||
if !reader.is_deleted(doc) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
doc = docset.advance();
|
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@@ -636,8 +641,9 @@ mod tests {
|
|||||||
.inverted_index(term.field())
|
.inverted_index(term.field())
|
||||||
.read_postings(&term, IndexRecordOption::Basic)
|
.read_postings(&term, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -659,8 +665,9 @@ mod tests {
|
|||||||
.inverted_index(term.field())
|
.inverted_index(term.field())
|
||||||
.read_postings(&term, IndexRecordOption::Basic)
|
.read_postings(&term, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -682,8 +689,9 @@ mod tests {
|
|||||||
.inverted_index(term.field())
|
.inverted_index(term.field())
|
||||||
.read_postings(&term, IndexRecordOption::Basic)
|
.read_postings(&term, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -752,8 +760,10 @@ mod tests {
|
|||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let doc = doc!(text_field=>"af af af bc bc");
|
{
|
||||||
index_writer.add_document(doc);
|
let doc = doc!(text_field=>"af af af bc bc");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@@ -769,9 +779,10 @@ mod tests {
|
|||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.term_freq(), 3);
|
assert_eq!(postings.term_freq(), 3);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -37,9 +37,9 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::PositionSerializer;
|
use super::{PositionReader, PositionSerializer};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::positions::reader::PositionReader;
|
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||||
use std::iter;
|
use std::iter;
|
||||||
|
|
||||||
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
|
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
|
||||||
@@ -68,7 +68,7 @@ pub mod tests {
|
|||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||||
for &n in &[1, 10, 127, 128, 130, 312] {
|
for &n in &[1, 10, 127, 128, 130, 312] {
|
||||||
let mut v = vec![0u32; n];
|
let mut v = vec![0u32; n];
|
||||||
position_reader.read(0, &mut v[..]);
|
position_reader.read(&mut v[..n]);
|
||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
assert_eq!(v[i], i as u32);
|
assert_eq!(v[i], i as u32);
|
||||||
}
|
}
|
||||||
@@ -76,19 +76,19 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_position_read_with_offset() {
|
fn test_position_skip() {
|
||||||
let v: Vec<u32> = (0..1000).collect();
|
let v: Vec<u32> = (0..1_000).collect();
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||||
assert_eq!(skip.len(), 12);
|
assert_eq!(skip.len(), 12);
|
||||||
assert_eq!(stream.len(), 1168);
|
assert_eq!(stream.len(), 1168);
|
||||||
|
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||||
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
|
position_reader.skip(10);
|
||||||
for &len in &[1, 10, 130, 500] {
|
for &n in &[10, 127, COMPRESSION_BLOCK_SIZE, 130, 312] {
|
||||||
let mut v = vec![0u32; len];
|
let mut v = vec![0u32; n];
|
||||||
position_reader.read(offset, &mut v[..]);
|
position_reader.read(&mut v[..n]);
|
||||||
for i in 0..len {
|
for i in 0..n {
|
||||||
assert_eq!(v[i], i as u32 + offset as u32);
|
assert_eq!(v[i], 10u32 + i as u32);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -103,12 +103,11 @@ pub mod tests {
|
|||||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||||
let mut buf = [0u32; 7];
|
let mut buf = [0u32; 7];
|
||||||
let mut c = 0;
|
let mut c = 0;
|
||||||
|
|
||||||
let mut offset = 0;
|
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
position_reader.read(offset, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
position_reader.read(offset, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
offset += 7;
|
position_reader.skip(4);
|
||||||
|
position_reader.skip(3);
|
||||||
for &el in &buf {
|
for &el in &buf {
|
||||||
assert_eq!(c, el);
|
assert_eq!(c, el);
|
||||||
c += 1;
|
c += 1;
|
||||||
@@ -116,58 +115,6 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_position_reread_anchor_different_than_block() {
|
|
||||||
let v: Vec<u32> = (0..2_000_000).collect();
|
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
|
||||||
assert_eq!(skip.len(), 15_749);
|
|
||||||
assert_eq!(stream.len(), 4_987_872);
|
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
|
|
||||||
let mut buf = [0u32; 256];
|
|
||||||
position_reader.read(128, &mut buf);
|
|
||||||
for i in 0..256 {
|
|
||||||
assert_eq!(buf[i], (128 + i) as u32);
|
|
||||||
}
|
|
||||||
position_reader.read(128, &mut buf);
|
|
||||||
for i in 0..256 {
|
|
||||||
assert_eq!(buf[i], (128 + i) as u32);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic(expected = "offset arguments should be increasing.")]
|
|
||||||
fn test_position_panic_if_called_previous_anchor() {
|
|
||||||
let v: Vec<u32> = (0..2_000_000).collect();
|
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
|
||||||
assert_eq!(skip.len(), 15_749);
|
|
||||||
assert_eq!(stream.len(), 4_987_872);
|
|
||||||
let mut buf = [0u32; 1];
|
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 200_000);
|
|
||||||
position_reader.read(230, &mut buf);
|
|
||||||
position_reader.read(9, &mut buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_positions_bug() {
|
|
||||||
let mut v: Vec<u32> = vec![];
|
|
||||||
for i in 1..200 {
|
|
||||||
for j in 0..i {
|
|
||||||
v.push(j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
|
|
||||||
let mut offset = 0;
|
|
||||||
for i in 1..24 {
|
|
||||||
buf.resize(i, 0);
|
|
||||||
position_reader.read(offset, &mut buf[..]);
|
|
||||||
offset += i as u64;
|
|
||||||
let r: Vec<u32> = (0..i).map(|el| el as u32).collect();
|
|
||||||
assert_eq!(buf, &r[..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_position_long_skip_const() {
|
fn test_position_long_skip_const() {
|
||||||
const CONST_VAL: u32 = 9u32;
|
const CONST_VAL: u32 = 9u32;
|
||||||
@@ -177,7 +124,7 @@ pub mod tests {
|
|||||||
assert_eq!(stream.len(), 1_000_000);
|
assert_eq!(stream.len(), 1_000_000);
|
||||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
|
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
|
||||||
let mut buf = [0u32; 1];
|
let mut buf = [0u32; 1];
|
||||||
position_reader.read(0, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
assert_eq!(buf[0], CONST_VAL);
|
assert_eq!(buf[0], CONST_VAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -196,7 +143,7 @@ pub mod tests {
|
|||||||
] {
|
] {
|
||||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
|
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
|
||||||
let mut buf = [0u32; 1];
|
let mut buf = [0u32; 1];
|
||||||
position_reader.read(0, &mut buf);
|
position_reader.read(&mut buf);
|
||||||
assert_eq!(buf[0], offset as u32);
|
assert_eq!(buf[0], offset as u32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ use crate::directory::ReadOnlySource;
|
|||||||
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
use crate::positions::COMPRESSION_BLOCK_SIZE;
|
||||||
use crate::positions::LONG_SKIP_INTERVAL;
|
use crate::positions::LONG_SKIP_INTERVAL;
|
||||||
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
use crate::positions::LONG_SKIP_IN_BLOCKS;
|
||||||
|
use crate::postings::compression::compressed_block_size;
|
||||||
/// Positions works as a long sequence of compressed block.
|
/// Positions works as a long sequence of compressed block.
|
||||||
/// All terms are chained one after the other.
|
/// All terms are chained one after the other.
|
||||||
///
|
///
|
||||||
@@ -61,20 +62,22 @@ impl Positions {
|
|||||||
|
|
||||||
fn reader(&self, offset: u64) -> PositionReader {
|
fn reader(&self, offset: u64) -> PositionReader {
|
||||||
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
|
||||||
|
let small_skip = (offset % LONG_SKIP_INTERVAL) as usize;
|
||||||
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
let offset_num_bytes: u64 = self.long_skip(long_skip_id);
|
||||||
let mut position_read = OwnedRead::new(self.position_source.clone());
|
let mut position_read = OwnedRead::new(self.position_source.clone());
|
||||||
position_read.advance(offset_num_bytes as usize);
|
position_read.advance(offset_num_bytes as usize);
|
||||||
let mut skip_read = OwnedRead::new(self.skip_source.clone());
|
let mut skip_read = OwnedRead::new(self.skip_source.clone());
|
||||||
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
||||||
PositionReader {
|
let mut position_reader = PositionReader {
|
||||||
bit_packer: self.bit_packer,
|
bit_packer: self.bit_packer,
|
||||||
skip_read,
|
skip_read,
|
||||||
position_read,
|
position_read,
|
||||||
|
inner_offset: 0,
|
||||||
buffer: Box::new([0u32; 128]),
|
buffer: Box::new([0u32; 128]),
|
||||||
block_offset: std::i64::MAX as u64,
|
ahead: None,
|
||||||
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
|
};
|
||||||
abs_offset: offset,
|
position_reader.skip(small_skip);
|
||||||
}
|
position_reader
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,12 +85,51 @@ pub struct PositionReader {
|
|||||||
skip_read: OwnedRead,
|
skip_read: OwnedRead,
|
||||||
position_read: OwnedRead,
|
position_read: OwnedRead,
|
||||||
bit_packer: BitPacker4x,
|
bit_packer: BitPacker4x,
|
||||||
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
|
inner_offset: usize,
|
||||||
|
buffer: Box<[u32; 128]>,
|
||||||
|
ahead: Option<usize>, // if None, no block is loaded.
|
||||||
|
// if Some(num_blocks), the block currently loaded is num_blocks ahead
|
||||||
|
// of the block of the next int to read.
|
||||||
|
}
|
||||||
|
|
||||||
block_offset: u64,
|
// `ahead` represents the offset of the block currently loaded
|
||||||
anchor_offset: u64,
|
// compared to the cursor of the actual stream.
|
||||||
|
//
|
||||||
abs_offset: u64,
|
// By contract, when this function is called, the current block has to be
|
||||||
|
// decompressed.
|
||||||
|
//
|
||||||
|
// If the requested number of els ends exactly at a given block, the next
|
||||||
|
// block is not decompressed.
|
||||||
|
fn read_impl(
|
||||||
|
bit_packer: BitPacker4x,
|
||||||
|
mut position: &[u8],
|
||||||
|
buffer: &mut [u32; 128],
|
||||||
|
mut inner_offset: usize,
|
||||||
|
num_bits: &[u8],
|
||||||
|
output: &mut [u32],
|
||||||
|
) -> usize {
|
||||||
|
let mut output_start = 0;
|
||||||
|
let mut output_len = output.len();
|
||||||
|
let mut ahead = 0;
|
||||||
|
loop {
|
||||||
|
let available_len = COMPRESSION_BLOCK_SIZE - inner_offset;
|
||||||
|
// We have enough elements in the current block.
|
||||||
|
// Let's copy the requested elements in the output buffer,
|
||||||
|
// and return.
|
||||||
|
if output_len <= available_len {
|
||||||
|
output[output_start..].copy_from_slice(&buffer[inner_offset..][..output_len]);
|
||||||
|
return ahead;
|
||||||
|
}
|
||||||
|
output[output_start..][..available_len].copy_from_slice(&buffer[inner_offset..]);
|
||||||
|
output_len -= available_len;
|
||||||
|
output_start += available_len;
|
||||||
|
inner_offset = 0;
|
||||||
|
let num_bits = num_bits[ahead];
|
||||||
|
bit_packer.decompress(position, &mut buffer[..], num_bits);
|
||||||
|
let block_len = compressed_block_size(num_bits);
|
||||||
|
position = &position[block_len..];
|
||||||
|
ahead += 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PositionReader {
|
impl PositionReader {
|
||||||
@@ -99,65 +141,57 @@ impl PositionReader {
|
|||||||
Positions::new(position_source, skip_source).reader(offset)
|
Positions::new(position_source, skip_source).reader(offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn advance_num_blocks(&mut self, num_blocks: usize) {
|
/// Fills a buffer with the next `output.len()` integers.
|
||||||
let num_bits: usize = self.skip_read.as_ref()[..num_blocks]
|
/// This does not consume / advance the stream.
|
||||||
.iter()
|
pub fn read(&mut self, output: &mut [u32]) {
|
||||||
.cloned()
|
let skip_data = self.skip_read.as_ref();
|
||||||
.map(|num_bits| num_bits as usize)
|
let position_data = self.position_read.as_ref();
|
||||||
.sum();
|
let num_bits = self.skip_read.get(0);
|
||||||
let num_bytes_to_skip = num_bits * COMPRESSION_BLOCK_SIZE / 8;
|
if self.ahead != Some(0) {
|
||||||
self.skip_read.advance(num_blocks as usize);
|
// the block currently available is not the block
|
||||||
self.position_read.advance(num_bytes_to_skip);
|
// for the current position
|
||||||
}
|
|
||||||
|
|
||||||
/// Fills a buffer with the positions `[offset..offset+output.len())` integers.
|
|
||||||
///
|
|
||||||
/// `offset` is required to have a value >= to the offsets given in previous calls
|
|
||||||
/// for the given `PositionReaderAbsolute` instance.
|
|
||||||
pub fn read(&mut self, mut offset: u64, mut output: &mut [u32]) {
|
|
||||||
offset += self.abs_offset;
|
|
||||||
assert!(
|
|
||||||
offset >= self.anchor_offset,
|
|
||||||
"offset arguments should be increasing."
|
|
||||||
);
|
|
||||||
let delta_to_block_offset = offset as i64 - self.block_offset as i64;
|
|
||||||
if delta_to_block_offset < 0 || delta_to_block_offset >= 128 {
|
|
||||||
// The first position is not within the first block.
|
|
||||||
// We need to decompress the first block.
|
|
||||||
let delta_to_anchor_offset = offset - self.anchor_offset;
|
|
||||||
let num_blocks_to_skip =
|
|
||||||
(delta_to_anchor_offset / (COMPRESSION_BLOCK_SIZE as u64)) as usize;
|
|
||||||
self.advance_num_blocks(num_blocks_to_skip);
|
|
||||||
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
|
|
||||||
self.block_offset = self.anchor_offset;
|
|
||||||
let num_bits = self.skip_read.get(0);
|
|
||||||
self.bit_packer
|
|
||||||
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
|
|
||||||
} else {
|
|
||||||
let num_blocks_to_skip =
|
|
||||||
((self.block_offset - self.anchor_offset) / COMPRESSION_BLOCK_SIZE as u64) as usize;
|
|
||||||
self.advance_num_blocks(num_blocks_to_skip);
|
|
||||||
self.anchor_offset = self.block_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut num_bits = self.skip_read.get(0);
|
|
||||||
let mut position_data = self.position_read.as_ref();
|
|
||||||
|
|
||||||
for i in 1.. {
|
|
||||||
let offset_in_block = (offset as usize) % COMPRESSION_BLOCK_SIZE;
|
|
||||||
let remaining_in_block = COMPRESSION_BLOCK_SIZE - offset_in_block;
|
|
||||||
if remaining_in_block >= output.len() {
|
|
||||||
output.copy_from_slice(&self.buffer[offset_in_block..][..output.len()]);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
output[..remaining_in_block].copy_from_slice(&self.buffer[offset_in_block..]);
|
|
||||||
output = &mut output[remaining_in_block..];
|
|
||||||
offset += remaining_in_block as u64;
|
|
||||||
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
|
|
||||||
num_bits = self.skip_read.get(i);
|
|
||||||
self.bit_packer
|
self.bit_packer
|
||||||
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
.decompress(position_data, self.buffer.as_mut(), num_bits);
|
||||||
self.block_offset += COMPRESSION_BLOCK_SIZE as u64;
|
self.ahead = Some(0);
|
||||||
}
|
}
|
||||||
|
let block_len = compressed_block_size(num_bits);
|
||||||
|
self.ahead = Some(read_impl(
|
||||||
|
self.bit_packer,
|
||||||
|
&position_data[block_len..],
|
||||||
|
self.buffer.as_mut(),
|
||||||
|
self.inner_offset,
|
||||||
|
&skip_data[1..],
|
||||||
|
output,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Skip the next `skip_len` integer.
|
||||||
|
///
|
||||||
|
/// If a full block is skipped, calling
|
||||||
|
/// `.skip(...)` will avoid decompressing it.
|
||||||
|
///
|
||||||
|
/// May panic if the end of the stream is reached.
|
||||||
|
pub fn skip(&mut self, skip_len: usize) {
|
||||||
|
let skip_len_plus_inner_offset = skip_len + self.inner_offset;
|
||||||
|
|
||||||
|
let num_blocks_to_advance = skip_len_plus_inner_offset / COMPRESSION_BLOCK_SIZE;
|
||||||
|
self.inner_offset = skip_len_plus_inner_offset % COMPRESSION_BLOCK_SIZE;
|
||||||
|
|
||||||
|
self.ahead = self.ahead.and_then(|num_blocks| {
|
||||||
|
if num_blocks >= num_blocks_to_advance {
|
||||||
|
Some(num_blocks - num_blocks_to_advance)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let skip_len_in_bits = self.skip_read.as_ref()[..num_blocks_to_advance]
|
||||||
|
.iter()
|
||||||
|
.map(|num_bits| *num_bits as usize)
|
||||||
|
.sum::<usize>()
|
||||||
|
* COMPRESSION_BLOCK_SIZE;
|
||||||
|
let skip_len_in_bytes = skip_len_in_bits / 8;
|
||||||
|
self.skip_read.advance(num_blocks_to_advance);
|
||||||
|
self.position_read.advance(skip_len_in_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -87,7 +87,6 @@ fn exponential_search(arr: &[u32], target: u32) -> (usize, usize) {
|
|||||||
(begin, end)
|
(begin, end)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(never)]
|
|
||||||
fn galloping(block_docs: &[u32], target: u32) -> usize {
|
fn galloping(block_docs: &[u32], target: u32) -> usize {
|
||||||
let (start, end) = exponential_search(&block_docs, target);
|
let (start, end) = exponential_search(&block_docs, target);
|
||||||
start + linear_search(&block_docs[start..end], target)
|
start + linear_search(&block_docs[start..end], target)
|
||||||
@@ -130,18 +129,23 @@ impl BlockSearcher {
|
|||||||
///
|
///
|
||||||
/// If SSE2 instructions are available in the `(platform, running CPU)`,
|
/// If SSE2 instructions are available in the `(platform, running CPU)`,
|
||||||
/// then we use a different implementation that does an exhaustive linear search over
|
/// then we use a different implementation that does an exhaustive linear search over
|
||||||
/// the block regardless of whether the block is full or not.
|
/// the full block whenever the block is full (`len == 128`). It is surprisingly faster, most likely because of the lack
|
||||||
///
|
/// of branch.
|
||||||
/// Indeed, if the block is not full, the remaining items are TERMINATED.
|
pub(crate) fn search_in_block(
|
||||||
/// It is surprisingly faster, most likely because of the lack of branch misprediction.
|
self,
|
||||||
pub(crate) fn search_in_block(self, block_docs: &AlignedBuffer, target: u32) -> usize {
|
block_docs: &AlignedBuffer,
|
||||||
|
len: usize,
|
||||||
|
start: usize,
|
||||||
|
target: u32,
|
||||||
|
) -> usize {
|
||||||
#[cfg(target_arch = "x86_64")]
|
#[cfg(target_arch = "x86_64")]
|
||||||
{
|
{
|
||||||
if self == BlockSearcher::SSE2 {
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
|
if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE {
|
||||||
return sse2::linear_search_sse2_128(block_docs, target);
|
return sse2::linear_search_sse2_128(block_docs, target);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
galloping(&block_docs.0[..], target)
|
start + galloping(&block_docs.0[start..len], target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,7 +166,6 @@ mod tests {
|
|||||||
use super::exponential_search;
|
use super::exponential_search;
|
||||||
use super::linear_search;
|
use super::linear_search;
|
||||||
use super::BlockSearcher;
|
use super::BlockSearcher;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -193,12 +196,19 @@ mod tests {
|
|||||||
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
|
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
|
||||||
let cursor = search_in_block_trivial_but_slow(block, target);
|
let cursor = search_in_block_trivial_but_slow(block, target);
|
||||||
assert!(block.len() < COMPRESSION_BLOCK_SIZE);
|
assert!(block.len() < COMPRESSION_BLOCK_SIZE);
|
||||||
let mut output_buffer = [TERMINATED; COMPRESSION_BLOCK_SIZE];
|
let mut output_buffer = [u32::max_value(); COMPRESSION_BLOCK_SIZE];
|
||||||
output_buffer[..block.len()].copy_from_slice(block);
|
output_buffer[..block.len()].copy_from_slice(block);
|
||||||
assert_eq!(
|
for i in 0..cursor {
|
||||||
block_searcher.search_in_block(&AlignedBuffer(output_buffer), target),
|
assert_eq!(
|
||||||
cursor
|
block_searcher.search_in_block(
|
||||||
);
|
&AlignedBuffer(output_buffer),
|
||||||
|
block.len(),
|
||||||
|
i,
|
||||||
|
target
|
||||||
|
),
|
||||||
|
cursor
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn util_test_search_in_block_all(block_searcher: BlockSearcher, block: &[u32]) {
|
fn util_test_search_in_block_all(block_searcher: BlockSearcher, block: &[u32]) {
|
||||||
|
|||||||
@@ -1,427 +0,0 @@
|
|||||||
use crate::common::{BinarySerializable, VInt};
|
|
||||||
use crate::directory::ReadOnlySource;
|
|
||||||
use crate::postings::compression::{
|
|
||||||
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
|
||||||
};
|
|
||||||
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
|
|
||||||
use crate::schema::IndexRecordOption;
|
|
||||||
use crate::{DocId, TERMINATED};
|
|
||||||
|
|
||||||
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
|
||||||
/// of documents.
|
|
||||||
///
|
|
||||||
/// # Warning
|
|
||||||
///
|
|
||||||
/// While it is useful for some very specific high-performance
|
|
||||||
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
|
||||||
pub struct BlockSegmentPostings {
|
|
||||||
pub(crate) doc_decoder: BlockDecoder,
|
|
||||||
loaded_offset: usize,
|
|
||||||
freq_decoder: BlockDecoder,
|
|
||||||
freq_reading_option: FreqReadingOption,
|
|
||||||
|
|
||||||
doc_freq: usize,
|
|
||||||
|
|
||||||
data: ReadOnlySource,
|
|
||||||
skip_reader: SkipReader,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decode_bitpacked_block(
|
|
||||||
doc_decoder: &mut BlockDecoder,
|
|
||||||
freq_decoder_opt: Option<&mut BlockDecoder>,
|
|
||||||
data: &[u8],
|
|
||||||
doc_offset: DocId,
|
|
||||||
doc_num_bits: u8,
|
|
||||||
tf_num_bits: u8,
|
|
||||||
) {
|
|
||||||
let num_consumed_bytes = doc_decoder.uncompress_block_sorted(data, doc_offset, doc_num_bits);
|
|
||||||
if let Some(freq_decoder) = freq_decoder_opt {
|
|
||||||
freq_decoder.uncompress_block_unsorted(&data[num_consumed_bytes..], tf_num_bits);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decode_vint_block(
|
|
||||||
doc_decoder: &mut BlockDecoder,
|
|
||||||
freq_decoder_opt: Option<&mut BlockDecoder>,
|
|
||||||
data: &[u8],
|
|
||||||
doc_offset: DocId,
|
|
||||||
num_vint_docs: usize,
|
|
||||||
) {
|
|
||||||
doc_decoder.clear();
|
|
||||||
let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
|
|
||||||
if let Some(freq_decoder) = freq_decoder_opt {
|
|
||||||
freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn split_into_skips_and_postings(
|
|
||||||
doc_freq: u32,
|
|
||||||
data: ReadOnlySource,
|
|
||||||
) -> (Option<ReadOnlySource>, ReadOnlySource) {
|
|
||||||
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
|
|
||||||
return (None, data);
|
|
||||||
}
|
|
||||||
let mut data_byte_arr = data.as_slice();
|
|
||||||
let skip_len = VInt::deserialize(&mut data_byte_arr)
|
|
||||||
.expect("Data corrupted")
|
|
||||||
.0 as usize;
|
|
||||||
let vint_len = data.len() - data_byte_arr.len();
|
|
||||||
let (skip_data, postings_data) = data.slice_from(vint_len).split(skip_len);
|
|
||||||
(Some(skip_data), postings_data)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockSegmentPostings {
|
|
||||||
pub(crate) fn from_data(
|
|
||||||
doc_freq: u32,
|
|
||||||
data: ReadOnlySource,
|
|
||||||
record_option: IndexRecordOption,
|
|
||||||
requested_option: IndexRecordOption,
|
|
||||||
) -> BlockSegmentPostings {
|
|
||||||
let freq_reading_option = match (record_option, requested_option) {
|
|
||||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
|
||||||
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
|
||||||
(_, _) => FreqReadingOption::ReadFreq,
|
|
||||||
};
|
|
||||||
|
|
||||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
|
|
||||||
let skip_reader = match skip_data_opt {
|
|
||||||
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
|
|
||||||
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
|
|
||||||
};
|
|
||||||
|
|
||||||
let doc_freq = doc_freq as usize;
|
|
||||||
let mut block_segment_postings = BlockSegmentPostings {
|
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
|
||||||
loaded_offset: std::usize::MAX,
|
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
|
||||||
freq_reading_option,
|
|
||||||
doc_freq,
|
|
||||||
data: postings_data,
|
|
||||||
skip_reader,
|
|
||||||
};
|
|
||||||
block_segment_postings.advance();
|
|
||||||
block_segment_postings
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resets the block segment postings on another position
|
|
||||||
// in the postings file.
|
|
||||||
//
|
|
||||||
// This is useful for enumerating through a list of terms,
|
|
||||||
// and consuming the associated posting lists while avoiding
|
|
||||||
// reallocating a `BlockSegmentPostings`.
|
|
||||||
//
|
|
||||||
// # Warning
|
|
||||||
//
|
|
||||||
// This does not reset the positions list.
|
|
||||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
|
|
||||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
|
||||||
self.data = ReadOnlySource::new(postings_data);
|
|
||||||
self.loaded_offset = std::usize::MAX;
|
|
||||||
self.loaded_offset = std::usize::MAX;
|
|
||||||
if let Some(skip_data) = skip_data_opt {
|
|
||||||
self.skip_reader.reset(skip_data, doc_freq);
|
|
||||||
} else {
|
|
||||||
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
|
|
||||||
}
|
|
||||||
self.doc_freq = doc_freq as usize;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the document frequency associated to this block postings.
|
|
||||||
///
|
|
||||||
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
|
||||||
/// length, and it does not take in account deleted documents.
|
|
||||||
pub fn doc_freq(&self) -> usize {
|
|
||||||
self.doc_freq
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the array of docs in the current block.
|
|
||||||
///
|
|
||||||
/// Before the first call to `.advance()`, the block
|
|
||||||
/// returned by `.docs()` is empty.
|
|
||||||
#[inline]
|
|
||||||
pub fn docs(&self) -> &[DocId] {
|
|
||||||
self.doc_decoder.output_array()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
|
|
||||||
self.doc_decoder.output_aligned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the document at index `idx` of the block.
|
|
||||||
#[inline(always)]
|
|
||||||
pub fn doc(&self, idx: usize) -> u32 {
|
|
||||||
self.doc_decoder.output(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the array of `term freq` in the block.
|
|
||||||
#[inline]
|
|
||||||
pub fn freqs(&self) -> &[u32] {
|
|
||||||
self.freq_decoder.output_array()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the frequency at index `idx` of the block.
|
|
||||||
#[inline]
|
|
||||||
pub fn freq(&self, idx: usize) -> u32 {
|
|
||||||
self.freq_decoder.output(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the length of the current block.
|
|
||||||
///
|
|
||||||
/// All blocks have a length of `NUM_DOCS_PER_BLOCK`,
|
|
||||||
/// except the last block that may have a length
|
|
||||||
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
|
||||||
#[inline]
|
|
||||||
pub fn block_len(&self) -> usize {
|
|
||||||
self.doc_decoder.output_len
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn position_offset(&self) -> u64 {
|
|
||||||
self.skip_reader.position_offset()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Position on a block that may contains `target_doc`.
|
|
||||||
///
|
|
||||||
/// If all docs are smaller than target, the block loaded may be empty,
|
|
||||||
/// or be the last an incomplete VInt block.
|
|
||||||
pub fn seek(&mut self, target_doc: DocId) {
|
|
||||||
self.skip_reader.seek(target_doc);
|
|
||||||
self.load_block();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_block(&mut self) {
|
|
||||||
let offset = self.skip_reader.byte_offset();
|
|
||||||
if self.loaded_offset == offset {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
self.loaded_offset = offset;
|
|
||||||
match self.skip_reader.block_info() {
|
|
||||||
BlockInfo::BitPacked {
|
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits,
|
|
||||||
..
|
|
||||||
} => {
|
|
||||||
decode_bitpacked_block(
|
|
||||||
&mut self.doc_decoder,
|
|
||||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
|
||||||
Some(&mut self.freq_decoder)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
&self.data.as_slice()[offset..],
|
|
||||||
self.skip_reader.last_doc_in_previous_block,
|
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
BlockInfo::VInt(num_vint_docs) => {
|
|
||||||
decode_vint_block(
|
|
||||||
&mut self.doc_decoder,
|
|
||||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
|
||||||
Some(&mut self.freq_decoder)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
&self.data.as_slice()[offset..],
|
|
||||||
self.skip_reader.last_doc_in_previous_block,
|
|
||||||
num_vint_docs as usize,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next block.
|
|
||||||
///
|
|
||||||
/// Returns false iff there was no remaining blocks.
|
|
||||||
pub fn advance(&mut self) -> bool {
|
|
||||||
if !self.skip_reader.advance() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
self.load_block();
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an empty segment postings object
|
|
||||||
pub fn empty() -> BlockSegmentPostings {
|
|
||||||
BlockSegmentPostings {
|
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
|
||||||
loaded_offset: std::usize::MAX,
|
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
|
||||||
freq_reading_option: FreqReadingOption::NoFreq,
|
|
||||||
doc_freq: 0,
|
|
||||||
data: ReadOnlySource::new(vec![]),
|
|
||||||
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::BlockSegmentPostings;
|
|
||||||
use crate::common::HasLen;
|
|
||||||
use crate::core::Index;
|
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
|
||||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
|
||||||
use crate::postings::postings::Postings;
|
|
||||||
use crate::postings::SegmentPostings;
|
|
||||||
use crate::schema::IndexRecordOption;
|
|
||||||
use crate::schema::Schema;
|
|
||||||
use crate::schema::Term;
|
|
||||||
use crate::schema::INDEXED;
|
|
||||||
use crate::DocId;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_segment_postings() {
|
|
||||||
let mut postings = SegmentPostings::empty();
|
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
|
||||||
assert_eq!(postings.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_postings_doc_returns_terminated() {
|
|
||||||
let mut postings = SegmentPostings::empty();
|
|
||||||
assert_eq!(postings.doc(), TERMINATED);
|
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_postings_doc_term_freq_returns_0() {
|
|
||||||
let postings = SegmentPostings::empty();
|
|
||||||
assert_eq!(postings.term_freq(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty_block_segment_postings() {
|
|
||||||
let mut postings = BlockSegmentPostings::empty();
|
|
||||||
assert!(!postings.advance());
|
|
||||||
assert_eq!(postings.doc_freq(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_block_segment_postings() {
|
|
||||||
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
|
||||||
let mut offset: u32 = 0u32;
|
|
||||||
// checking that the `doc_freq` is correct
|
|
||||||
assert_eq!(block_segments.doc_freq(), 100_000);
|
|
||||||
loop {
|
|
||||||
let block = block_segments.docs();
|
|
||||||
for (i, doc) in block.iter().cloned().enumerate() {
|
|
||||||
assert_eq!(offset + (i as u32), doc);
|
|
||||||
}
|
|
||||||
offset += block.len() as u32;
|
|
||||||
if block_segments.advance() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_skip_right_at_new_block() {
|
|
||||||
let mut doc_ids = (0..128).collect::<Vec<u32>>();
|
|
||||||
// 128 is missing
|
|
||||||
doc_ids.push(129);
|
|
||||||
doc_ids.push(130);
|
|
||||||
{
|
|
||||||
let block_segments = build_block_postings(&doc_ids);
|
|
||||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
|
||||||
assert_eq!(docset.seek(128), 129);
|
|
||||||
assert_eq!(docset.doc(), 129);
|
|
||||||
assert_eq!(docset.advance(), 130);
|
|
||||||
assert_eq!(docset.doc(), 130);
|
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let block_segments = build_block_postings(&doc_ids);
|
|
||||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
|
||||||
assert_eq!(docset.seek(129), 129);
|
|
||||||
assert_eq!(docset.doc(), 129);
|
|
||||||
assert_eq!(docset.advance(), 130);
|
|
||||||
assert_eq!(docset.doc(), 130);
|
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let block_segments = build_block_postings(&doc_ids);
|
|
||||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
|
||||||
assert_eq!(docset.doc(), 0);
|
|
||||||
assert_eq!(docset.seek(131), TERMINATED);
|
|
||||||
assert_eq!(docset.doc(), TERMINATED);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
let mut last_doc = 0u32;
|
|
||||||
for &doc in docs {
|
|
||||||
for _ in last_doc..doc {
|
|
||||||
index_writer.add_document(doc!(int_field=>1u64));
|
|
||||||
}
|
|
||||||
index_writer.add_document(doc!(int_field=>0u64));
|
|
||||||
last_doc = doc + 1;
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
|
||||||
let segment_reader = searcher.segment_reader(0);
|
|
||||||
let inverted_index = segment_reader.inverted_index(int_field);
|
|
||||||
let term = Term::from_field_u64(int_field, 0u64);
|
|
||||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
|
||||||
inverted_index.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_block_segment_postings_skip2() {
|
|
||||||
let mut docs = vec![0];
|
|
||||||
for i in 0..1300 {
|
|
||||||
docs.push((i * i / 100) + i);
|
|
||||||
}
|
|
||||||
let mut block_postings = build_block_postings(&docs[..]);
|
|
||||||
for i in vec![0, 424, 10000] {
|
|
||||||
block_postings.seek(i);
|
|
||||||
let docs = block_postings.docs();
|
|
||||||
assert!(docs[0] <= i);
|
|
||||||
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
|
|
||||||
}
|
|
||||||
block_postings.seek(100_000);
|
|
||||||
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_reset_block_segment_postings() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
// create two postings list, one containg even number,
|
|
||||||
// the other containing odd numbers.
|
|
||||||
for i in 0..6 {
|
|
||||||
let doc = doc!(int_field=> (i % 2) as u64);
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
|
||||||
let segment_reader = searcher.segment_reader(0);
|
|
||||||
|
|
||||||
let mut block_segments;
|
|
||||||
{
|
|
||||||
let term = Term::from_field_u64(int_field, 0u64);
|
|
||||||
let inverted_index = segment_reader.inverted_index(int_field);
|
|
||||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
|
||||||
block_segments = inverted_index
|
|
||||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic);
|
|
||||||
}
|
|
||||||
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
|
||||||
{
|
|
||||||
let term = Term::from_field_u64(int_field, 1u64);
|
|
||||||
let inverted_index = segment_reader.inverted_index(int_field);
|
|
||||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
|
||||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
|
|
||||||
}
|
|
||||||
assert!(block_segments.advance());
|
|
||||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
use crate::common::FixedSize;
|
use crate::common::FixedSize;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use bitpacking::{BitPacker, BitPacker4x};
|
use bitpacking::{BitPacker, BitPacker4x};
|
||||||
|
|
||||||
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
||||||
@@ -18,12 +17,6 @@ pub struct BlockEncoder {
|
|||||||
pub output_len: usize,
|
pub output_len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BlockEncoder {
|
|
||||||
fn default() -> Self {
|
|
||||||
BlockEncoder::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockEncoder {
|
impl BlockEncoder {
|
||||||
pub fn new() -> BlockEncoder {
|
pub fn new() -> BlockEncoder {
|
||||||
BlockEncoder {
|
BlockEncoder {
|
||||||
@@ -61,13 +54,11 @@ pub struct BlockDecoder {
|
|||||||
pub output_len: usize,
|
pub output_len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BlockDecoder {
|
impl BlockDecoder {
|
||||||
fn default() -> Self {
|
pub fn new() -> BlockDecoder {
|
||||||
BlockDecoder::with_val(0u32)
|
BlockDecoder::with_val(0u32)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockDecoder {
|
|
||||||
pub fn with_val(val: u32) -> BlockDecoder {
|
pub fn with_val(val: u32) -> BlockDecoder {
|
||||||
BlockDecoder {
|
BlockDecoder {
|
||||||
bitpacker: BitPacker4x::new(),
|
bitpacker: BitPacker4x::new(),
|
||||||
@@ -99,18 +90,14 @@ impl BlockDecoder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub(crate) fn output_aligned(&self) -> &AlignedBuffer {
|
pub(crate) fn output_aligned(&self) -> (&AlignedBuffer, usize) {
|
||||||
&self.output
|
(&self.output, self.output_len)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn output(&self, idx: usize) -> u32 {
|
pub fn output(&self, idx: usize) -> u32 {
|
||||||
self.output.0[idx]
|
self.output.0[idx]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.output.0.iter_mut().for_each(|el| *el = TERMINATED);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VIntEncoder {
|
pub trait VIntEncoder {
|
||||||
@@ -147,9 +134,9 @@ pub trait VIntDecoder {
|
|||||||
/// For instance, if delta encoded are `1, 3, 9`, and the
|
/// For instance, if delta encoded are `1, 3, 9`, and the
|
||||||
/// `offset` is 5, then the output will be:
|
/// `offset` is 5, then the output will be:
|
||||||
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
||||||
fn uncompress_vint_sorted(
|
fn uncompress_vint_sorted<'a>(
|
||||||
&mut self,
|
&mut self,
|
||||||
compressed_data: &[u8],
|
compressed_data: &'a [u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
) -> usize;
|
) -> usize;
|
||||||
@@ -159,7 +146,7 @@ pub trait VIntDecoder {
|
|||||||
///
|
///
|
||||||
/// The method takes a number of int to decompress, and returns
|
/// The method takes a number of int to decompress, and returns
|
||||||
/// the amount of bytes that were read to decompress them.
|
/// the amount of bytes that were read to decompress them.
|
||||||
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize;
|
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VIntEncoder for BlockEncoder {
|
impl VIntEncoder for BlockEncoder {
|
||||||
@@ -173,9 +160,9 @@ impl VIntEncoder for BlockEncoder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl VIntDecoder for BlockDecoder {
|
impl VIntDecoder for BlockDecoder {
|
||||||
fn uncompress_vint_sorted(
|
fn uncompress_vint_sorted<'a>(
|
||||||
&mut self,
|
&mut self,
|
||||||
compressed_data: &[u8],
|
compressed_data: &'a [u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
@@ -183,7 +170,7 @@ impl VIntDecoder for BlockDecoder {
|
|||||||
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize {
|
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
||||||
}
|
}
|
||||||
@@ -199,7 +186,7 @@ pub mod tests {
|
|||||||
let vals: Vec<u32> = (0u32..128u32).map(|i| i * 7).collect();
|
let vals: Vec<u32> = (0u32..128u32).map(|i| i * 7).collect();
|
||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 0);
|
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 0);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 0, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 0, num_bits);
|
||||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||||
@@ -212,9 +199,9 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_encode_sorted_block_with_offset() {
|
fn test_encode_sorted_block_with_offset() {
|
||||||
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i * 7).collect();
|
let vals: Vec<u32> = (0u32..128u32).map(|i| 11 + i * 7).collect();
|
||||||
let mut encoder = BlockEncoder::default();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 10, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_sorted(compressed_data, 10, num_bits);
|
||||||
assert_eq!(consumed_num_bytes, compressed_data.len());
|
assert_eq!(consumed_num_bytes, compressed_data.len());
|
||||||
@@ -229,11 +216,11 @@ pub mod tests {
|
|||||||
let mut compressed: Vec<u8> = Vec::new();
|
let mut compressed: Vec<u8> = Vec::new();
|
||||||
let n = 128;
|
let n = 128;
|
||||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32).collect();
|
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32).collect();
|
||||||
let mut encoder = BlockEncoder::default();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
let (num_bits, compressed_data) = encoder.compress_block_sorted(&vals, 10);
|
||||||
compressed.extend_from_slice(compressed_data);
|
compressed.extend_from_slice(compressed_data);
|
||||||
compressed.push(173u8);
|
compressed.push(173u8);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_sorted(&compressed, 10, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_sorted(&compressed, 10, num_bits);
|
||||||
assert_eq!(consumed_num_bytes, compressed.len() - 1);
|
assert_eq!(consumed_num_bytes, compressed.len() - 1);
|
||||||
@@ -249,11 +236,11 @@ pub mod tests {
|
|||||||
let mut compressed: Vec<u8> = Vec::new();
|
let mut compressed: Vec<u8> = Vec::new();
|
||||||
let n = 128;
|
let n = 128;
|
||||||
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32 % 12).collect();
|
let vals: Vec<u32> = (0..n).map(|i| 11u32 + (i as u32) * 7u32 % 12).collect();
|
||||||
let mut encoder = BlockEncoder::default();
|
let mut encoder = BlockEncoder::new();
|
||||||
let (num_bits, compressed_data) = encoder.compress_block_unsorted(&vals);
|
let (num_bits, compressed_data) = encoder.compress_block_unsorted(&vals);
|
||||||
compressed.extend_from_slice(compressed_data);
|
compressed.extend_from_slice(compressed_data);
|
||||||
compressed.push(173u8);
|
compressed.push(173u8);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
{
|
{
|
||||||
let consumed_num_bytes = decoder.uncompress_block_unsorted(&compressed, num_bits);
|
let consumed_num_bytes = decoder.uncompress_block_unsorted(&compressed, num_bits);
|
||||||
assert_eq!(consumed_num_bytes + 1, compressed.len());
|
assert_eq!(consumed_num_bytes + 1, compressed.len());
|
||||||
@@ -264,11 +251,6 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_block_decoder_initialization() {
|
|
||||||
let block = BlockDecoder::with_val(TERMINATED);
|
|
||||||
assert_eq!(block.output(0), TERMINATED);
|
|
||||||
}
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_vint() {
|
fn test_encode_vint() {
|
||||||
{
|
{
|
||||||
@@ -278,7 +260,7 @@ pub mod tests {
|
|||||||
for offset in &[0u32, 1u32, 2u32] {
|
for offset in &[0u32, 1u32, 2u32] {
|
||||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||||
assert!(encoded_data.len() <= expected_length);
|
assert!(encoded_data.len() <= expected_length);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::new();
|
||||||
let consumed_num_bytes =
|
let consumed_num_bytes =
|
||||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
||||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ pub(crate) fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn uncompress_sorted(compressed_data: &[u8], output: &mut [u32], offset: u32) -> usize {
|
pub fn uncompress_sorted<'a>(compressed_data: &'a [u8], output: &mut [u32], offset: u32) -> usize {
|
||||||
let mut read_byte = 0;
|
let mut read_byte = 0;
|
||||||
let mut result = offset;
|
let mut result = offset;
|
||||||
for output_mut in output.iter_mut() {
|
for output_mut in output.iter_mut() {
|
||||||
|
|||||||
@@ -3,8 +3,11 @@ Postings module (also called inverted index)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
mod block_search;
|
mod block_search;
|
||||||
mod block_segment_postings;
|
|
||||||
pub(crate) mod compression;
|
pub(crate) mod compression;
|
||||||
|
/// Postings module
|
||||||
|
///
|
||||||
|
/// Postings, also called inverted lists, is the key datastructure
|
||||||
|
/// to full-text search.
|
||||||
mod postings;
|
mod postings;
|
||||||
mod postings_writer;
|
mod postings_writer;
|
||||||
mod recorder;
|
mod recorder;
|
||||||
@@ -19,17 +22,18 @@ pub(crate) use self::block_search::BlockSearcher;
|
|||||||
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
||||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||||
|
|
||||||
|
use self::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
pub use self::postings::Postings;
|
pub use self::postings::Postings;
|
||||||
pub(crate) use self::skip::{BlockInfo, SkipReader};
|
pub(crate) use self::skip::SkipReader;
|
||||||
pub use self::term_info::TermInfo;
|
pub use self::term_info::TermInfo;
|
||||||
|
|
||||||
pub use self::block_segment_postings::BlockSegmentPostings;
|
pub use self::segment_postings::{BlockSegmentPostings, SegmentPostings};
|
||||||
pub use self::segment_postings::SegmentPostings;
|
|
||||||
|
|
||||||
pub(crate) use self::stacker::compute_table_size;
|
pub(crate) use self::stacker::compute_table_size;
|
||||||
|
|
||||||
pub use crate::common::HasLen;
|
pub use crate::common::HasLen;
|
||||||
|
|
||||||
|
pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32;
|
||||||
pub(crate) type UnorderedTermId = u64;
|
pub(crate) type UnorderedTermId = u64;
|
||||||
|
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
|
||||||
@@ -47,10 +51,10 @@ pub mod tests {
|
|||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::indexer::operation::AddOperation;
|
use crate::indexer::operation::AddOperation;
|
||||||
use crate::indexer::SegmentWriter;
|
use crate::indexer::{IndexWriterConfig, SegmentWriter, ResourceManager};
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::merge_policy::NoMergePolicy;
|
||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
|
use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
|
||||||
@@ -111,12 +115,29 @@ pub mod tests {
|
|||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
postings.advance();
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&[0, 1, 2], &positions[..]);
|
assert_eq!(&[0, 1, 2], &positions[..]);
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&[0, 1, 2], &positions[..]);
|
assert_eq!(&[0, 1, 2], &positions[..]);
|
||||||
assert_eq!(postings.advance(), 1);
|
postings.advance();
|
||||||
|
postings.positions(&mut positions);
|
||||||
|
assert_eq!(&[0, 5], &positions[..]);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut postings = inverted_index
|
||||||
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
|
.unwrap();
|
||||||
|
postings.advance();
|
||||||
|
postings.advance();
|
||||||
|
postings.positions(&mut positions);
|
||||||
|
assert_eq!(&[0, 5], &positions[..]);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut postings = inverted_index
|
||||||
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(postings.skip_next(1), SkipResult::Reached);
|
||||||
assert_eq!(postings.doc(), 1);
|
assert_eq!(postings.doc(), 1);
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&[0, 5], &positions[..]);
|
assert_eq!(&[0, 5], &positions[..]);
|
||||||
@@ -125,25 +146,7 @@ pub mod tests {
|
|||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.skip_next(1002), SkipResult::Reached);
|
||||||
assert_eq!(postings.advance(), 1);
|
|
||||||
postings.positions(&mut positions);
|
|
||||||
assert_eq!(&[0, 5], &positions[..]);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let mut postings = inverted_index
|
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(postings.seek(1), 1);
|
|
||||||
assert_eq!(postings.doc(), 1);
|
|
||||||
postings.positions(&mut positions);
|
|
||||||
assert_eq!(&[0, 5], &positions[..]);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let mut postings = inverted_index
|
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(postings.seek(1002), 1002);
|
|
||||||
assert_eq!(postings.doc(), 1002);
|
assert_eq!(postings.doc(), 1002);
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&[0, 5], &positions[..]);
|
assert_eq!(&[0, 5], &positions[..]);
|
||||||
@@ -152,8 +155,8 @@ pub mod tests {
|
|||||||
let mut postings = inverted_index
|
let mut postings = inverted_index
|
||||||
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.seek(100), 100);
|
assert_eq!(postings.skip_next(100), SkipResult::Reached);
|
||||||
assert_eq!(postings.seek(1002), 1002);
|
assert_eq!(postings.skip_next(1002), SkipResult::Reached);
|
||||||
assert_eq!(postings.doc(), 1002);
|
assert_eq!(postings.doc(), 1002);
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&[0, 5], &positions[..]);
|
assert_eq!(&[0, 5], &positions[..]);
|
||||||
@@ -213,11 +216,17 @@ pub mod tests {
|
|||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let segment = index.new_segment();
|
|
||||||
|
|
||||||
{
|
let resource_manager = ResourceManager::default();
|
||||||
let mut segment_writer =
|
let segment = {
|
||||||
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
|
let mut segment_writer = SegmentWriter::for_segment(
|
||||||
|
&IndexWriterConfig::for_test(),
|
||||||
|
index.new_segment(),
|
||||||
|
&schema,
|
||||||
|
index.tokenizers(),
|
||||||
|
resource_manager
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
{
|
{
|
||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
// checking that position works if the field has two values
|
// checking that position works if the field has two values
|
||||||
@@ -249,8 +258,9 @@ pub mod tests {
|
|||||||
};
|
};
|
||||||
segment_writer.add_document(op, &schema).unwrap();
|
segment_writer.add_document(op, &schema).unwrap();
|
||||||
}
|
}
|
||||||
segment_writer.finalize().unwrap();
|
let (segment, _) = segment_writer.finalize().unwrap();
|
||||||
}
|
segment
|
||||||
|
};
|
||||||
{
|
{
|
||||||
let segment_reader = SegmentReader::open(&segment).unwrap();
|
let segment_reader = SegmentReader::open(&segment).unwrap();
|
||||||
{
|
{
|
||||||
@@ -278,21 +288,22 @@ pub mod tests {
|
|||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings_a.len(), 1000);
|
assert_eq!(postings_a.len(), 1000);
|
||||||
|
assert!(postings_a.advance());
|
||||||
assert_eq!(postings_a.doc(), 0);
|
assert_eq!(postings_a.doc(), 0);
|
||||||
assert_eq!(postings_a.term_freq(), 6);
|
assert_eq!(postings_a.term_freq(), 6);
|
||||||
postings_a.positions(&mut positions);
|
postings_a.positions(&mut positions);
|
||||||
assert_eq!(&positions[..], [0, 2, 4, 6, 7, 13]);
|
assert_eq!(&positions[..], [0, 2, 4, 6, 7, 13]);
|
||||||
assert_eq!(postings_a.advance(), 1u32);
|
assert!(postings_a.advance());
|
||||||
assert_eq!(postings_a.doc(), 1u32);
|
assert_eq!(postings_a.doc(), 1u32);
|
||||||
assert_eq!(postings_a.term_freq(), 1);
|
assert_eq!(postings_a.term_freq(), 1);
|
||||||
for i in 2u32..1000u32 {
|
for i in 2u32..1000u32 {
|
||||||
assert_eq!(postings_a.advance(), i);
|
assert!(postings_a.advance());
|
||||||
assert_eq!(postings_a.term_freq(), 1);
|
assert_eq!(postings_a.term_freq(), 1);
|
||||||
postings_a.positions(&mut positions);
|
postings_a.positions(&mut positions);
|
||||||
assert_eq!(&positions[..], [i]);
|
assert_eq!(&positions[..], [i]);
|
||||||
assert_eq!(postings_a.doc(), i);
|
assert_eq!(postings_a.doc(), i);
|
||||||
}
|
}
|
||||||
assert_eq!(postings_a.advance(), TERMINATED);
|
assert!(!postings_a.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let term_e = Term::from_field_text(text_field, "e");
|
let term_e = Term::from_field_text(text_field, "e");
|
||||||
@@ -302,6 +313,7 @@ pub mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings_e.len(), 1000 - 2);
|
assert_eq!(postings_e.len(), 1000 - 2);
|
||||||
for i in 2u32..1000u32 {
|
for i in 2u32..1000u32 {
|
||||||
|
assert!(postings_e.advance());
|
||||||
assert_eq!(postings_e.term_freq(), i);
|
assert_eq!(postings_e.term_freq(), i);
|
||||||
postings_e.positions(&mut positions);
|
postings_e.positions(&mut positions);
|
||||||
assert_eq!(positions.len(), i as usize);
|
assert_eq!(positions.len(), i as usize);
|
||||||
@@ -309,9 +321,8 @@ pub mod tests {
|
|||||||
assert_eq!(positions[j], (j as u32));
|
assert_eq!(positions[j], (j as u32));
|
||||||
}
|
}
|
||||||
assert_eq!(postings_e.doc(), i);
|
assert_eq!(postings_e.doc(), i);
|
||||||
postings_e.advance();
|
|
||||||
}
|
}
|
||||||
assert_eq!(postings_e.doc(), TERMINATED);
|
assert!(!postings_e.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -325,8 +336,16 @@ pub mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "g b b d c g c"));
|
{
|
||||||
index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
|
let mut doc = Document::default();
|
||||||
|
doc.add_text(text_field, "g b b d c g c");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut doc = Document::default();
|
||||||
|
doc.add_text(text_field, "g a b b a d c g c");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
@@ -336,6 +355,7 @@ pub mod tests {
|
|||||||
.inverted_index(text_field)
|
.inverted_index(text_field)
|
||||||
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 1u32);
|
assert_eq!(postings.doc(), 1u32);
|
||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&positions[..], &[1u32, 4]);
|
assert_eq!(&positions[..], &[1u32, 4]);
|
||||||
@@ -357,8 +377,11 @@ pub mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
for i in 0u64..num_docs as u64 {
|
for i in 0..num_docs {
|
||||||
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
|
let mut doc = Document::default();
|
||||||
|
doc.add_u64(value_field, 2);
|
||||||
|
doc.add_u64(value_field, (i % 2) as u64);
|
||||||
|
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
@@ -375,10 +398,11 @@ pub mod tests {
|
|||||||
.inverted_index(term_2.field())
|
.inverted_index(term_2.field())
|
||||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(segment_postings.seek(i), i);
|
|
||||||
|
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
|
||||||
assert_eq!(segment_postings.doc(), i);
|
assert_eq!(segment_postings.doc(), i);
|
||||||
|
|
||||||
assert_eq!(segment_postings.seek(j), j);
|
assert_eq!(segment_postings.skip_next(j), SkipResult::Reached);
|
||||||
assert_eq!(segment_postings.doc(), j);
|
assert_eq!(segment_postings.doc(), j);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -390,16 +414,17 @@ pub mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// check that `skip_next` advances the iterator
|
// check that `skip_next` advances the iterator
|
||||||
|
assert!(segment_postings.advance());
|
||||||
assert_eq!(segment_postings.doc(), 0);
|
assert_eq!(segment_postings.doc(), 0);
|
||||||
|
|
||||||
assert_eq!(segment_postings.seek(1), 1);
|
assert_eq!(segment_postings.skip_next(1), SkipResult::Reached);
|
||||||
assert_eq!(segment_postings.doc(), 1);
|
assert_eq!(segment_postings.doc(), 1);
|
||||||
|
|
||||||
assert_eq!(segment_postings.seek(1), 1);
|
assert_eq!(segment_postings.skip_next(1), SkipResult::OverStep);
|
||||||
assert_eq!(segment_postings.doc(), 1);
|
assert_eq!(segment_postings.doc(), 2);
|
||||||
|
|
||||||
// check that going beyond the end is handled
|
// check that going beyond the end is handled
|
||||||
assert_eq!(segment_postings.seek(num_docs), TERMINATED);
|
assert_eq!(segment_postings.skip_next(num_docs), SkipResult::End);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check that filtering works
|
// check that filtering works
|
||||||
@@ -410,7 +435,7 @@ pub mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for i in 0..num_docs / 2 {
|
for i in 0..num_docs / 2 {
|
||||||
assert_eq!(segment_postings.seek(i * 2), i * 2);
|
assert_eq!(segment_postings.skip_next(i * 2), SkipResult::Reached);
|
||||||
assert_eq!(segment_postings.doc(), i * 2);
|
assert_eq!(segment_postings.doc(), i * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -420,7 +445,7 @@ pub mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for i in 0..num_docs / 2 - 1 {
|
for i in 0..num_docs / 2 - 1 {
|
||||||
assert!(segment_postings.seek(i * 2 + 1) > (i * 1) * 2);
|
assert_eq!(segment_postings.skip_next(i * 2 + 1), SkipResult::OverStep);
|
||||||
assert_eq!(segment_postings.doc(), (i + 1) * 2);
|
assert_eq!(segment_postings.doc(), (i + 1) * 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -432,7 +457,6 @@ pub mod tests {
|
|||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
assert_eq!(searcher.segment_readers().len(), 1);
|
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
|
||||||
// make sure seeking still works
|
// make sure seeking still works
|
||||||
@@ -443,11 +467,11 @@ pub mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
if i % 2 == 0 {
|
if i % 2 == 0 {
|
||||||
assert_eq!(segment_postings.seek(i), i);
|
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
|
||||||
assert_eq!(segment_postings.doc(), i);
|
assert_eq!(segment_postings.doc(), i);
|
||||||
assert!(segment_reader.is_deleted(i));
|
assert!(segment_reader.is_deleted(i));
|
||||||
} else {
|
} else {
|
||||||
assert_eq!(segment_postings.seek(i), i);
|
assert_eq!(segment_postings.skip_next(i), SkipResult::Reached);
|
||||||
assert_eq!(segment_postings.doc(), i);
|
assert_eq!(segment_postings.doc(), i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -462,16 +486,12 @@ pub mod tests {
|
|||||||
let mut last = 2; // start from 5 to avoid seeking to 3 twice
|
let mut last = 2; // start from 5 to avoid seeking to 3 twice
|
||||||
let mut cur = 3;
|
let mut cur = 3;
|
||||||
loop {
|
loop {
|
||||||
let seek = segment_postings.seek(cur);
|
match segment_postings.skip_next(cur) {
|
||||||
if seek == TERMINATED {
|
SkipResult::End => break,
|
||||||
break;
|
SkipResult::Reached => assert_eq!(segment_postings.doc(), cur),
|
||||||
}
|
SkipResult::OverStep => assert_eq!(segment_postings.doc(), cur + 1),
|
||||||
assert_eq!(seek, segment_postings.doc());
|
|
||||||
if seek == cur {
|
|
||||||
assert_eq!(segment_postings.doc(), cur);
|
|
||||||
} else {
|
|
||||||
assert_eq!(segment_postings.doc(), cur + 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let next = cur + last;
|
let next = cur + last;
|
||||||
last = cur;
|
last = cur;
|
||||||
cur = next;
|
cur = next;
|
||||||
@@ -557,7 +577,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet> DocSet for UnoptimizedDocSet<TDocSet> {
|
impl<TDocSet: DocSet> DocSet for UnoptimizedDocSet<TDocSet> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
self.0.advance()
|
self.0.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -583,22 +603,30 @@ pub mod tests {
|
|||||||
for target in targets {
|
for target in targets {
|
||||||
let mut postings_opt = postings_factory();
|
let mut postings_opt = postings_factory();
|
||||||
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
|
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
|
||||||
let skip_result_opt = postings_opt.seek(target);
|
let skip_result_opt = postings_opt.skip_next(target);
|
||||||
let skip_result_unopt = postings_unopt.seek(target);
|
let skip_result_unopt = postings_unopt.skip_next(target);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_result_unopt, skip_result_opt,
|
skip_result_unopt, skip_result_opt,
|
||||||
"Failed while skipping to {}",
|
"Failed while skipping to {}",
|
||||||
target
|
target
|
||||||
);
|
);
|
||||||
assert!(skip_result_opt >= target);
|
match skip_result_opt {
|
||||||
assert_eq!(skip_result_opt, postings_opt.doc());
|
SkipResult::Reached => assert_eq!(postings_opt.doc(), target),
|
||||||
if skip_result_opt == TERMINATED {
|
SkipResult::OverStep => assert!(postings_opt.doc() > target),
|
||||||
return;
|
SkipResult::End => {
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
while postings_opt.doc() != TERMINATED {
|
while postings_opt.advance() {
|
||||||
assert_eq!(postings_opt.doc(), postings_unopt.doc());
|
assert!(postings_unopt.advance());
|
||||||
assert_eq!(postings_opt.advance(), postings_unopt.advance());
|
assert_eq!(
|
||||||
|
postings_opt.doc(),
|
||||||
|
postings_unopt.doc(),
|
||||||
|
"Failed while skipping to {}",
|
||||||
|
target
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
assert!(!postings_unopt.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -607,7 +635,7 @@ pub mod tests {
|
|||||||
mod bench {
|
mod bench {
|
||||||
|
|
||||||
use super::tests::*;
|
use super::tests::*;
|
||||||
use crate::docset::TERMINATED;
|
use crate::docset::SkipResult;
|
||||||
use crate::query::Intersection;
|
use crate::query::Intersection;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::tests;
|
use crate::tests;
|
||||||
@@ -625,7 +653,7 @@ mod bench {
|
|||||||
.inverted_index(TERM_A.field())
|
.inverted_index(TERM_A.field())
|
||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
while segment_postings.advance() != TERMINATED {}
|
while segment_postings.advance() {}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -657,7 +685,7 @@ mod bench {
|
|||||||
segment_postings_c,
|
segment_postings_c,
|
||||||
segment_postings_d,
|
segment_postings_d,
|
||||||
]);
|
]);
|
||||||
while intersection.advance() != TERMINATED {}
|
while intersection.advance() {}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -673,10 +701,11 @@ mod bench {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut existing_docs = Vec::new();
|
let mut existing_docs = Vec::new();
|
||||||
|
segment_postings.advance();
|
||||||
for doc in &docs {
|
for doc in &docs {
|
||||||
if *doc >= segment_postings.doc() {
|
if *doc >= segment_postings.doc() {
|
||||||
existing_docs.push(*doc);
|
existing_docs.push(*doc);
|
||||||
if segment_postings.seek(*doc) == TERMINATED {
|
if segment_postings.skip_next(*doc) == SkipResult::End {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -688,7 +717,7 @@ mod bench {
|
|||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
for doc in &existing_docs {
|
for doc in &existing_docs {
|
||||||
if segment_postings.seek(*doc) == TERMINATED {
|
if segment_postings.skip_next(*doc) == SkipResult::End {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -727,9 +756,8 @@ mod bench {
|
|||||||
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
.read_postings(&*TERM_A, IndexRecordOption::Basic)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut s = 0u32;
|
let mut s = 0u32;
|
||||||
while segment_postings.doc() != TERMINATED {
|
while segment_postings.advance() {
|
||||||
s += (segment_postings.doc() & n) % 1024;
|
s += (segment_postings.doc() & n) % 1024;
|
||||||
segment_postings.advance()
|
|
||||||
}
|
}
|
||||||
s
|
s
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ impl MultiFieldPostingsWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> u64 {
|
||||||
self.term_index.mem_usage() + self.heap.mem_usage()
|
self.term_index.mem_usage() + self.heap.mem_usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,19 +1,56 @@
|
|||||||
|
use crate::common::BitSet;
|
||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
|
use crate::common::{BinarySerializable, VInt};
|
||||||
use crate::docset::DocSet;
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::positions::PositionReader;
|
use crate::positions::PositionReader;
|
||||||
|
use crate::postings::compression::{compressed_block_size, AlignedBuffer};
|
||||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
use crate::postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
|
||||||
use crate::postings::serializer::PostingsSerializer;
|
use crate::postings::serializer::PostingsSerializer;
|
||||||
use crate::postings::BlockSearcher;
|
use crate::postings::BlockSearcher;
|
||||||
|
use crate::postings::FreqReadingOption;
|
||||||
use crate::postings::Postings;
|
use crate::postings::Postings;
|
||||||
|
use crate::postings::SkipReader;
|
||||||
|
use crate::postings::USE_SKIP_INFO_LIMIT;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use owned_read::OwnedRead;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
use tantivy_fst::Streamer;
|
||||||
|
|
||||||
use crate::directory::ReadOnlySource;
|
struct PositionComputer {
|
||||||
use crate::postings::BlockSegmentPostings;
|
// store the amount of position int
|
||||||
|
// before reading positions.
|
||||||
|
//
|
||||||
|
// if none, position are already loaded in
|
||||||
|
// the positions vec.
|
||||||
|
position_to_skip: usize,
|
||||||
|
position_reader: PositionReader,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PositionComputer {
|
||||||
|
pub fn new(position_reader: PositionReader) -> PositionComputer {
|
||||||
|
PositionComputer {
|
||||||
|
position_to_skip: 0,
|
||||||
|
position_reader,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_skip(&mut self, num_skip: usize) {
|
||||||
|
self.position_to_skip += num_skip;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Positions can only be read once.
|
||||||
|
pub fn positions_with_offset(&mut self, offset: u32, output: &mut [u32]) {
|
||||||
|
self.position_reader.skip(self.position_to_skip);
|
||||||
|
self.position_to_skip = 0;
|
||||||
|
self.position_reader.read(output);
|
||||||
|
let mut cum = offset;
|
||||||
|
for output_mut in output.iter_mut() {
|
||||||
|
cum += *output_mut;
|
||||||
|
*output_mut = cum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// `SegmentPostings` represents the inverted list or postings associated to
|
/// `SegmentPostings` represents the inverted list or postings associated to
|
||||||
/// a term in a `Segment`.
|
/// a term in a `Segment`.
|
||||||
@@ -23,17 +60,18 @@ use crate::postings::BlockSegmentPostings;
|
|||||||
pub struct SegmentPostings {
|
pub struct SegmentPostings {
|
||||||
block_cursor: BlockSegmentPostings,
|
block_cursor: BlockSegmentPostings,
|
||||||
cur: usize,
|
cur: usize,
|
||||||
position_reader: Option<PositionReader>,
|
position_computer: Option<PositionComputer>,
|
||||||
block_searcher: BlockSearcher,
|
block_searcher: BlockSearcher,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentPostings {
|
impl SegmentPostings {
|
||||||
/// Returns an empty segment postings object
|
/// Returns an empty segment postings object
|
||||||
pub fn empty() -> Self {
|
pub fn empty() -> Self {
|
||||||
|
let empty_block_cursor = BlockSegmentPostings::empty();
|
||||||
SegmentPostings {
|
SegmentPostings {
|
||||||
block_cursor: BlockSegmentPostings::empty(),
|
block_cursor: empty_block_cursor,
|
||||||
cur: 0,
|
cur: COMPRESSION_BLOCK_SIZE,
|
||||||
position_reader: None,
|
position_computer: None,
|
||||||
block_searcher: BlockSearcher::default(),
|
block_searcher: BlockSearcher::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -59,13 +97,15 @@ impl SegmentPostings {
|
|||||||
}
|
}
|
||||||
let block_segment_postings = BlockSegmentPostings::from_data(
|
let block_segment_postings = BlockSegmentPostings::from_data(
|
||||||
docs.len() as u32,
|
docs.len() as u32,
|
||||||
ReadOnlySource::from(buffer),
|
OwnedRead::new(buffer),
|
||||||
IndexRecordOption::Basic,
|
IndexRecordOption::Basic,
|
||||||
IndexRecordOption::Basic,
|
IndexRecordOption::Basic,
|
||||||
);
|
);
|
||||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SegmentPostings {
|
||||||
/// Reads a Segment postings from an &[u8]
|
/// Reads a Segment postings from an &[u8]
|
||||||
///
|
///
|
||||||
/// * `len` - number of document in the posting lists.
|
/// * `len` - number of document in the posting lists.
|
||||||
@@ -74,12 +114,12 @@ impl SegmentPostings {
|
|||||||
/// frequencies and/or positions
|
/// frequencies and/or positions
|
||||||
pub(crate) fn from_block_postings(
|
pub(crate) fn from_block_postings(
|
||||||
segment_block_postings: BlockSegmentPostings,
|
segment_block_postings: BlockSegmentPostings,
|
||||||
position_reader: Option<PositionReader>,
|
positions_stream_opt: Option<PositionReader>,
|
||||||
) -> SegmentPostings {
|
) -> SegmentPostings {
|
||||||
SegmentPostings {
|
SegmentPostings {
|
||||||
block_cursor: segment_block_postings,
|
block_cursor: segment_block_postings,
|
||||||
cur: 0, // cursor within the block
|
cur: COMPRESSION_BLOCK_SIZE, // cursor within the block
|
||||||
position_reader,
|
position_computer: positions_stream_opt.map(PositionComputer::new),
|
||||||
block_searcher: BlockSearcher::default(),
|
block_searcher: BlockSearcher::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -89,52 +129,134 @@ impl DocSet for SegmentPostings {
|
|||||||
// goes to the next element.
|
// goes to the next element.
|
||||||
// next needs to be called a first time to point to the correct element.
|
// next needs to be called a first time to point to the correct element.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
|
if self.position_computer.is_some() && self.cur < COMPRESSION_BLOCK_SIZE {
|
||||||
self.cur = 0;
|
let term_freq = self.term_freq() as usize;
|
||||||
self.block_cursor.advance();
|
if let Some(position_computer) = self.position_computer.as_mut() {
|
||||||
} else {
|
position_computer.add_skip(term_freq);
|
||||||
self.cur += 1;
|
}
|
||||||
}
|
}
|
||||||
self.doc()
|
self.cur += 1;
|
||||||
|
if self.cur >= self.block_cursor.block_len() {
|
||||||
|
self.cur = 0;
|
||||||
|
if !self.block_cursor.advance() {
|
||||||
|
self.cur = COMPRESSION_BLOCK_SIZE;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
if self.doc() == target {
|
if !self.advance() {
|
||||||
return target;
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
match self.doc().cmp(&target) {
|
||||||
|
Ordering::Equal => {
|
||||||
|
return SkipResult::Reached;
|
||||||
|
}
|
||||||
|
Ordering::Greater => {
|
||||||
|
return SkipResult::OverStep;
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
}
|
}
|
||||||
self.block_cursor.seek(target);
|
|
||||||
|
|
||||||
// At this point we are on the block, that might contain our document.
|
// In the following, thanks to the call to advance above,
|
||||||
let output = self.block_cursor.docs_aligned();
|
// we know that the position is not loaded and we need
|
||||||
|
// to skip every doc_freq we cross.
|
||||||
|
|
||||||
self.cur = self.block_searcher.search_in_block(&output, target);
|
// skip blocks until one that might contain the target
|
||||||
|
// check if we need to go to the next block
|
||||||
|
let mut sum_freqs_skipped: u32 = 0;
|
||||||
|
if !self
|
||||||
|
.block_cursor
|
||||||
|
.docs()
|
||||||
|
.last()
|
||||||
|
.map(|doc| *doc >= target)
|
||||||
|
.unwrap_or(false)
|
||||||
|
// there should always be at least a document in the block
|
||||||
|
// since advance returned.
|
||||||
|
{
|
||||||
|
// we are not in the right block.
|
||||||
|
//
|
||||||
|
// First compute all of the freqs skipped from the current block.
|
||||||
|
if self.position_computer.is_some() {
|
||||||
|
sum_freqs_skipped = self.block_cursor.freqs()[self.cur..].iter().sum();
|
||||||
|
match self.block_cursor.skip_to(target) {
|
||||||
|
BlockSegmentPostingsSkipResult::Success(block_skip_freqs) => {
|
||||||
|
sum_freqs_skipped += block_skip_freqs;
|
||||||
|
}
|
||||||
|
BlockSegmentPostingsSkipResult::Terminated => {
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if self.block_cursor.skip_to(target)
|
||||||
|
== BlockSegmentPostingsSkipResult::Terminated
|
||||||
|
{
|
||||||
|
// no positions needed. no need to sum freqs.
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
self.cur = 0;
|
||||||
|
}
|
||||||
|
|
||||||
// The last block is not full and padded with the value TERMINATED,
|
let cur = self.cur;
|
||||||
// so that we are guaranteed to have at least doc in the block (a real one or the padding)
|
|
||||||
// that is greater or equal to the target.
|
// we're in the right block now, start with an exponential search
|
||||||
debug_assert!(self.cur < COMPRESSION_BLOCK_SIZE);
|
let (output, len) = self.block_cursor.docs_aligned();
|
||||||
|
let new_cur = self
|
||||||
|
.block_searcher
|
||||||
|
.search_in_block(&output, len, cur, target);
|
||||||
|
if let Some(position_computer) = self.position_computer.as_mut() {
|
||||||
|
sum_freqs_skipped += self.block_cursor.freqs()[cur..new_cur].iter().sum::<u32>();
|
||||||
|
position_computer.add_skip(sum_freqs_skipped as usize);
|
||||||
|
}
|
||||||
|
self.cur = new_cur;
|
||||||
|
|
||||||
// `doc` is now the first element >= `target`
|
// `doc` is now the first element >= `target`
|
||||||
|
let doc = output.0[new_cur];
|
||||||
// If all docs are smaller than target the current block should be incomplemented and padded
|
|
||||||
// with the value `TERMINATED`.
|
|
||||||
//
|
|
||||||
// After the search, the cursor should point to the first value of TERMINATED.
|
|
||||||
let doc = output.0[self.cur];
|
|
||||||
debug_assert!(doc >= target);
|
debug_assert!(doc >= target);
|
||||||
doc
|
if doc == target {
|
||||||
|
SkipResult::Reached
|
||||||
|
} else {
|
||||||
|
SkipResult::OverStep
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the current document's `DocId`.
|
/// Return the current document's `DocId`.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// Will panics if called without having called advance before.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
self.block_cursor.doc(self.cur)
|
let docs = self.block_cursor.docs();
|
||||||
|
debug_assert!(
|
||||||
|
self.cur < docs.len(),
|
||||||
|
"Have you forgotten to call `.advance()` at least once before calling `.doc()` ."
|
||||||
|
);
|
||||||
|
docs[self.cur]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
fn size_hint(&self) -> u32 {
|
||||||
self.len() as u32
|
self.len() as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
// finish the current block
|
||||||
|
if self.advance() {
|
||||||
|
for &doc in &self.block_cursor.docs()[self.cur..] {
|
||||||
|
bitset.insert(doc);
|
||||||
|
}
|
||||||
|
// ... iterate through the remaining blocks.
|
||||||
|
while self.block_cursor.advance() {
|
||||||
|
for &doc in self.block_cursor.docs() {
|
||||||
|
bitset.insert(doc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HasLen for SegmentPostings {
|
impl HasLen for SegmentPostings {
|
||||||
@@ -168,52 +290,515 @@ impl Postings for SegmentPostings {
|
|||||||
|
|
||||||
fn positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
|
fn positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
|
||||||
let term_freq = self.term_freq() as usize;
|
let term_freq = self.term_freq() as usize;
|
||||||
if let Some(position_reader) = self.position_reader.as_mut() {
|
if let Some(position_comp) = self.position_computer.as_mut() {
|
||||||
let read_offset = self.block_cursor.position_offset()
|
|
||||||
+ (self.block_cursor.freqs()[..self.cur]
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.sum::<u32>() as u64);
|
|
||||||
output.resize(term_freq, 0u32);
|
output.resize(term_freq, 0u32);
|
||||||
position_reader.read(read_offset, &mut output[..]);
|
position_comp.positions_with_offset(offset, &mut output[..]);
|
||||||
let mut cum = offset;
|
|
||||||
for output_mut in output.iter_mut() {
|
|
||||||
cum += *output_mut;
|
|
||||||
*output_mut = cum;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
output.clear();
|
output.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
||||||
|
/// of documents.
|
||||||
|
///
|
||||||
|
/// # Warning
|
||||||
|
///
|
||||||
|
/// While it is useful for some very specific high-performance
|
||||||
|
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
||||||
|
pub struct BlockSegmentPostings {
|
||||||
|
doc_decoder: BlockDecoder,
|
||||||
|
freq_decoder: BlockDecoder,
|
||||||
|
freq_reading_option: FreqReadingOption,
|
||||||
|
|
||||||
|
doc_freq: usize,
|
||||||
|
doc_offset: DocId,
|
||||||
|
|
||||||
|
num_vint_docs: usize,
|
||||||
|
|
||||||
|
remaining_data: OwnedRead,
|
||||||
|
skip_reader: SkipReader,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn split_into_skips_and_postings(
|
||||||
|
doc_freq: u32,
|
||||||
|
mut data: OwnedRead,
|
||||||
|
) -> (Option<OwnedRead>, OwnedRead) {
|
||||||
|
if doc_freq >= USE_SKIP_INFO_LIMIT {
|
||||||
|
let skip_len = VInt::deserialize(&mut data).expect("Data corrupted").0 as usize;
|
||||||
|
let mut postings_data = data.clone();
|
||||||
|
postings_data.advance(skip_len);
|
||||||
|
data.clip(skip_len);
|
||||||
|
(Some(data), postings_data)
|
||||||
|
} else {
|
||||||
|
(None, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Eq, PartialEq)]
|
||||||
|
pub enum BlockSegmentPostingsSkipResult {
|
||||||
|
Terminated,
|
||||||
|
Success(u32), //< number of term freqs to skip
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockSegmentPostings {
|
||||||
|
pub(crate) fn from_data(
|
||||||
|
doc_freq: u32,
|
||||||
|
data: OwnedRead,
|
||||||
|
record_option: IndexRecordOption,
|
||||||
|
requested_option: IndexRecordOption,
|
||||||
|
) -> BlockSegmentPostings {
|
||||||
|
let freq_reading_option = match (record_option, requested_option) {
|
||||||
|
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
||||||
|
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
|
||||||
|
(_, _) => FreqReadingOption::ReadFreq,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
|
||||||
|
let skip_reader = match skip_data_opt {
|
||||||
|
Some(skip_data) => SkipReader::new(skip_data, record_option),
|
||||||
|
None => SkipReader::new(OwnedRead::new(&[][..]), record_option),
|
||||||
|
};
|
||||||
|
let doc_freq = doc_freq as usize;
|
||||||
|
let num_vint_docs = doc_freq % COMPRESSION_BLOCK_SIZE;
|
||||||
|
BlockSegmentPostings {
|
||||||
|
num_vint_docs,
|
||||||
|
doc_decoder: BlockDecoder::new(),
|
||||||
|
freq_decoder: BlockDecoder::with_val(1),
|
||||||
|
freq_reading_option,
|
||||||
|
doc_offset: 0,
|
||||||
|
doc_freq,
|
||||||
|
remaining_data: postings_data,
|
||||||
|
skip_reader,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resets the block segment postings on another position
|
||||||
|
// in the postings file.
|
||||||
|
//
|
||||||
|
// This is useful for enumerating through a list of terms,
|
||||||
|
// and consuming the associated posting lists while avoiding
|
||||||
|
// reallocating a `BlockSegmentPostings`.
|
||||||
|
//
|
||||||
|
// # Warning
|
||||||
|
//
|
||||||
|
// This does not reset the positions list.
|
||||||
|
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedRead) {
|
||||||
|
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
||||||
|
let num_vint_docs = (doc_freq as usize) & (COMPRESSION_BLOCK_SIZE - 1);
|
||||||
|
self.num_vint_docs = num_vint_docs;
|
||||||
|
self.remaining_data = postings_data;
|
||||||
|
if let Some(skip_data) = skip_data_opt {
|
||||||
|
self.skip_reader.reset(skip_data);
|
||||||
|
} else {
|
||||||
|
self.skip_reader.reset(OwnedRead::new(&[][..]))
|
||||||
|
}
|
||||||
|
self.doc_offset = 0;
|
||||||
|
self.doc_freq = doc_freq as usize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the document frequency associated to this block postings.
|
||||||
|
///
|
||||||
|
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
||||||
|
/// length, and it does not take in account deleted documents.
|
||||||
|
pub fn doc_freq(&self) -> usize {
|
||||||
|
self.doc_freq
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the array of docs in the current block.
|
||||||
|
///
|
||||||
|
/// Before the first call to `.advance()`, the block
|
||||||
|
/// returned by `.docs()` is empty.
|
||||||
|
#[inline]
|
||||||
|
pub fn docs(&self) -> &[DocId] {
|
||||||
|
self.doc_decoder.output_array()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn docs_aligned(&self) -> (&AlignedBuffer, usize) {
|
||||||
|
self.doc_decoder.output_aligned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the document at index `idx` of the block.
|
||||||
|
#[inline]
|
||||||
|
pub fn doc(&self, idx: usize) -> u32 {
|
||||||
|
self.doc_decoder.output(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the array of `term freq` in the block.
|
||||||
|
#[inline]
|
||||||
|
pub fn freqs(&self) -> &[u32] {
|
||||||
|
self.freq_decoder.output_array()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the frequency at index `idx` of the block.
|
||||||
|
#[inline]
|
||||||
|
pub fn freq(&self, idx: usize) -> u32 {
|
||||||
|
self.freq_decoder.output(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the length of the current block.
|
||||||
|
///
|
||||||
|
/// All blocks have a length of `NUM_DOCS_PER_BLOCK`,
|
||||||
|
/// except the last block that may have a length
|
||||||
|
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
||||||
|
#[inline]
|
||||||
|
fn block_len(&self) -> usize {
|
||||||
|
self.doc_decoder.output_len
|
||||||
|
}
|
||||||
|
|
||||||
|
/// position on a block that may contains `doc_id`.
|
||||||
|
/// Always advance the current block.
|
||||||
|
///
|
||||||
|
/// Returns true if a block that has an element greater or equal to the target is found.
|
||||||
|
/// Returning true does not guarantee that the smallest element of the block is smaller
|
||||||
|
/// than the target. It only guarantees that the last element is greater or equal.
|
||||||
|
///
|
||||||
|
/// Returns false iff all of the document remaining are smaller than
|
||||||
|
/// `doc_id`. In that case, all of these document are consumed.
|
||||||
|
///
|
||||||
|
pub fn skip_to(&mut self, target_doc: DocId) -> BlockSegmentPostingsSkipResult {
|
||||||
|
let mut skip_freqs = 0u32;
|
||||||
|
while self.skip_reader.advance() {
|
||||||
|
if self.skip_reader.doc() >= target_doc {
|
||||||
|
// the last document of the current block is larger
|
||||||
|
// than the target.
|
||||||
|
//
|
||||||
|
// We found our block!
|
||||||
|
let num_bits = self.skip_reader.doc_num_bits();
|
||||||
|
let num_consumed_bytes = self.doc_decoder.uncompress_block_sorted(
|
||||||
|
self.remaining_data.as_ref(),
|
||||||
|
self.doc_offset,
|
||||||
|
num_bits,
|
||||||
|
);
|
||||||
|
self.remaining_data.advance(num_consumed_bytes);
|
||||||
|
let tf_num_bits = self.skip_reader.tf_num_bits();
|
||||||
|
match self.freq_reading_option {
|
||||||
|
FreqReadingOption::NoFreq => {}
|
||||||
|
FreqReadingOption::SkipFreq => {
|
||||||
|
let num_bytes_to_skip = compressed_block_size(tf_num_bits);
|
||||||
|
self.remaining_data.advance(num_bytes_to_skip);
|
||||||
|
}
|
||||||
|
FreqReadingOption::ReadFreq => {
|
||||||
|
let num_consumed_bytes = self
|
||||||
|
.freq_decoder
|
||||||
|
.uncompress_block_unsorted(self.remaining_data.as_ref(), tf_num_bits);
|
||||||
|
self.remaining_data.advance(num_consumed_bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.doc_offset = self.skip_reader.doc();
|
||||||
|
return BlockSegmentPostingsSkipResult::Success(skip_freqs);
|
||||||
|
} else {
|
||||||
|
skip_freqs += self.skip_reader.tf_sum();
|
||||||
|
let advance_len = self.skip_reader.total_block_len();
|
||||||
|
self.doc_offset = self.skip_reader.doc();
|
||||||
|
self.remaining_data.advance(advance_len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we are now on the last, incomplete, variable encoded block.
|
||||||
|
if self.num_vint_docs > 0 {
|
||||||
|
let num_compressed_bytes = self.doc_decoder.uncompress_vint_sorted(
|
||||||
|
self.remaining_data.as_ref(),
|
||||||
|
self.doc_offset,
|
||||||
|
self.num_vint_docs,
|
||||||
|
);
|
||||||
|
self.remaining_data.advance(num_compressed_bytes);
|
||||||
|
match self.freq_reading_option {
|
||||||
|
FreqReadingOption::NoFreq | FreqReadingOption::SkipFreq => {}
|
||||||
|
FreqReadingOption::ReadFreq => {
|
||||||
|
self.freq_decoder
|
||||||
|
.uncompress_vint_unsorted(self.remaining_data.as_ref(), self.num_vint_docs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.num_vint_docs = 0;
|
||||||
|
return self
|
||||||
|
.docs()
|
||||||
|
.last()
|
||||||
|
.map(|last_doc| {
|
||||||
|
if *last_doc >= target_doc {
|
||||||
|
BlockSegmentPostingsSkipResult::Success(skip_freqs)
|
||||||
|
} else {
|
||||||
|
BlockSegmentPostingsSkipResult::Terminated
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap_or(BlockSegmentPostingsSkipResult::Terminated);
|
||||||
|
}
|
||||||
|
BlockSegmentPostingsSkipResult::Terminated
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Advance to the next block.
|
||||||
|
///
|
||||||
|
/// Returns false iff there was no remaining blocks.
|
||||||
|
pub fn advance(&mut self) -> bool {
|
||||||
|
if self.skip_reader.advance() {
|
||||||
|
let num_bits = self.skip_reader.doc_num_bits();
|
||||||
|
let num_consumed_bytes = self.doc_decoder.uncompress_block_sorted(
|
||||||
|
self.remaining_data.as_ref(),
|
||||||
|
self.doc_offset,
|
||||||
|
num_bits,
|
||||||
|
);
|
||||||
|
self.remaining_data.advance(num_consumed_bytes);
|
||||||
|
let tf_num_bits = self.skip_reader.tf_num_bits();
|
||||||
|
match self.freq_reading_option {
|
||||||
|
FreqReadingOption::NoFreq => {}
|
||||||
|
FreqReadingOption::SkipFreq => {
|
||||||
|
let num_bytes_to_skip = compressed_block_size(tf_num_bits);
|
||||||
|
self.remaining_data.advance(num_bytes_to_skip);
|
||||||
|
}
|
||||||
|
FreqReadingOption::ReadFreq => {
|
||||||
|
let num_consumed_bytes = self
|
||||||
|
.freq_decoder
|
||||||
|
.uncompress_block_unsorted(self.remaining_data.as_ref(), tf_num_bits);
|
||||||
|
self.remaining_data.advance(num_consumed_bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// it will be used as the next offset.
|
||||||
|
self.doc_offset = self.doc_decoder.output(COMPRESSION_BLOCK_SIZE - 1);
|
||||||
|
true
|
||||||
|
} else if self.num_vint_docs > 0 {
|
||||||
|
let num_compressed_bytes = self.doc_decoder.uncompress_vint_sorted(
|
||||||
|
self.remaining_data.as_ref(),
|
||||||
|
self.doc_offset,
|
||||||
|
self.num_vint_docs,
|
||||||
|
);
|
||||||
|
self.remaining_data.advance(num_compressed_bytes);
|
||||||
|
match self.freq_reading_option {
|
||||||
|
FreqReadingOption::NoFreq | FreqReadingOption::SkipFreq => {}
|
||||||
|
FreqReadingOption::ReadFreq => {
|
||||||
|
self.freq_decoder
|
||||||
|
.uncompress_vint_unsorted(self.remaining_data.as_ref(), self.num_vint_docs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.num_vint_docs = 0;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an empty segment postings object
|
||||||
|
pub fn empty() -> BlockSegmentPostings {
|
||||||
|
BlockSegmentPostings {
|
||||||
|
num_vint_docs: 0,
|
||||||
|
|
||||||
|
doc_decoder: BlockDecoder::new(),
|
||||||
|
freq_decoder: BlockDecoder::with_val(1),
|
||||||
|
freq_reading_option: FreqReadingOption::NoFreq,
|
||||||
|
|
||||||
|
doc_offset: 0,
|
||||||
|
doc_freq: 0,
|
||||||
|
|
||||||
|
remaining_data: OwnedRead::new(vec![]),
|
||||||
|
skip_reader: SkipReader::new(OwnedRead::new(vec![]), IndexRecordOption::Basic),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'b> Streamer<'b> for BlockSegmentPostings {
|
||||||
|
type Item = &'b [DocId];
|
||||||
|
|
||||||
|
fn next(&'b mut self) -> Option<&'b [DocId]> {
|
||||||
|
if self.advance() {
|
||||||
|
Some(self.docs())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use super::BlockSegmentPostings;
|
||||||
|
use super::BlockSegmentPostingsSkipResult;
|
||||||
use super::SegmentPostings;
|
use super::SegmentPostings;
|
||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
|
use crate::core::Index;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::postings::postings::Postings;
|
use crate::postings::postings::Postings;
|
||||||
|
use crate::schema::IndexRecordOption;
|
||||||
|
use crate::schema::Schema;
|
||||||
|
use crate::schema::Term;
|
||||||
|
use crate::schema::INDEXED;
|
||||||
|
use crate::DocId;
|
||||||
|
use crate::SkipResult;
|
||||||
|
use tantivy_fst::Streamer;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_empty_segment_postings() {
|
fn test_empty_segment_postings() {
|
||||||
let mut postings = SegmentPostings::empty();
|
let mut postings = SegmentPostings::empty();
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert!(!postings.advance());
|
||||||
assert_eq!(postings.len(), 0);
|
assert_eq!(postings.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_empty_postings_doc_returns_terminated() {
|
#[should_panic(expected = "Have you forgotten to call `.advance()`")]
|
||||||
let mut postings = SegmentPostings::empty();
|
fn test_panic_if_doc_called_before_advance() {
|
||||||
assert_eq!(postings.doc(), TERMINATED);
|
SegmentPostings::empty().doc();
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_empty_postings_doc_term_freq_returns_0() {
|
#[should_panic(expected = "Have you forgotten to call `.advance()`")]
|
||||||
let postings = SegmentPostings::empty();
|
fn test_panic_if_freq_called_before_advance() {
|
||||||
assert_eq!(postings.term_freq(), 1);
|
SegmentPostings::empty().term_freq();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty_block_segment_postings() {
|
||||||
|
let mut postings = BlockSegmentPostings::empty();
|
||||||
|
assert!(!postings.advance());
|
||||||
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_block_segment_postings() {
|
||||||
|
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
||||||
|
let mut offset: u32 = 0u32;
|
||||||
|
// checking that the block before calling advance is empty
|
||||||
|
assert!(block_segments.docs().is_empty());
|
||||||
|
// checking that the `doc_freq` is correct
|
||||||
|
assert_eq!(block_segments.doc_freq(), 100_000);
|
||||||
|
while let Some(block) = block_segments.next() {
|
||||||
|
for (i, doc) in block.iter().cloned().enumerate() {
|
||||||
|
assert_eq!(offset + (i as u32), doc);
|
||||||
|
}
|
||||||
|
offset += block.len() as u32;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_skip_right_at_new_block() {
|
||||||
|
let mut doc_ids = (0..128).collect::<Vec<u32>>();
|
||||||
|
doc_ids.push(129);
|
||||||
|
doc_ids.push(130);
|
||||||
|
{
|
||||||
|
let block_segments = build_block_postings(&doc_ids);
|
||||||
|
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||||
|
assert_eq!(docset.skip_next(128), SkipResult::OverStep);
|
||||||
|
assert_eq!(docset.doc(), 129);
|
||||||
|
assert!(docset.advance());
|
||||||
|
assert_eq!(docset.doc(), 130);
|
||||||
|
assert!(!docset.advance());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let block_segments = build_block_postings(&doc_ids);
|
||||||
|
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||||
|
assert_eq!(docset.skip_next(129), SkipResult::Reached);
|
||||||
|
assert_eq!(docset.doc(), 129);
|
||||||
|
assert!(docset.advance());
|
||||||
|
assert_eq!(docset.doc(), 130);
|
||||||
|
assert!(!docset.advance());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let block_segments = build_block_postings(&doc_ids);
|
||||||
|
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||||
|
assert_eq!(docset.skip_next(131), SkipResult::End);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
let mut last_doc = 0u32;
|
||||||
|
for &doc in docs {
|
||||||
|
for _ in last_doc..doc {
|
||||||
|
index_writer.add_document(doc!(int_field=>1u64));
|
||||||
|
}
|
||||||
|
index_writer.add_document(doc!(int_field=>0u64));
|
||||||
|
last_doc = doc + 1;
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let searcher = index.reader().unwrap().searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
let inverted_index = segment_reader.inverted_index(int_field);
|
||||||
|
let term = Term::from_field_u64(int_field, 0u64);
|
||||||
|
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||||
|
inverted_index.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_block_segment_postings_skip() {
|
||||||
|
for i in 0..4 {
|
||||||
|
let mut block_postings = build_block_postings(&[3]);
|
||||||
|
assert_eq!(
|
||||||
|
block_postings.skip_to(i),
|
||||||
|
BlockSegmentPostingsSkipResult::Success(0u32)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
block_postings.skip_to(i),
|
||||||
|
BlockSegmentPostingsSkipResult::Terminated
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let mut block_postings = build_block_postings(&[3]);
|
||||||
|
assert_eq!(
|
||||||
|
block_postings.skip_to(4u32),
|
||||||
|
BlockSegmentPostingsSkipResult::Terminated
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_block_segment_postings_skip2() {
|
||||||
|
let mut docs = vec![0];
|
||||||
|
for i in 0..1300 {
|
||||||
|
docs.push((i * i / 100) + i);
|
||||||
|
}
|
||||||
|
let mut block_postings = build_block_postings(&docs[..]);
|
||||||
|
for i in vec![0, 424, 10000] {
|
||||||
|
assert_eq!(
|
||||||
|
block_postings.skip_to(i),
|
||||||
|
BlockSegmentPostingsSkipResult::Success(0u32)
|
||||||
|
);
|
||||||
|
let docs = block_postings.docs();
|
||||||
|
assert!(docs[0] <= i);
|
||||||
|
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
|
||||||
|
}
|
||||||
|
assert_eq!(
|
||||||
|
block_postings.skip_to(100_000),
|
||||||
|
BlockSegmentPostingsSkipResult::Terminated
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
block_postings.skip_to(101_000),
|
||||||
|
BlockSegmentPostingsSkipResult::Terminated
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_reset_block_segment_postings() {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
// create two postings list, one containg even number,
|
||||||
|
// the other containing odd numbers.
|
||||||
|
for i in 0..6 {
|
||||||
|
let doc = doc!(int_field=> (i % 2) as u64);
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
index_writer.commit().unwrap();
|
||||||
|
let searcher = index.reader().unwrap().searcher();
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
|
||||||
|
let mut block_segments;
|
||||||
|
{
|
||||||
|
let term = Term::from_field_u64(int_field, 0u64);
|
||||||
|
let inverted_index = segment_reader.inverted_index(int_field);
|
||||||
|
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||||
|
block_segments = inverted_index
|
||||||
|
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic);
|
||||||
|
}
|
||||||
|
assert!(block_segments.advance());
|
||||||
|
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
||||||
|
{
|
||||||
|
let term = Term::from_field_u64(int_field, 1u64);
|
||||||
|
let inverted_index = segment_reader.inverted_index(int_field);
|
||||||
|
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||||
|
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
|
||||||
|
}
|
||||||
|
assert!(block_segments.advance());
|
||||||
|
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ use crate::directory::WritePtr;
|
|||||||
use crate::positions::PositionSerializer;
|
use crate::positions::PositionSerializer;
|
||||||
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
||||||
use crate::postings::skip::SkipSerializer;
|
use crate::postings::skip::SkipSerializer;
|
||||||
|
use crate::postings::USE_SKIP_INFO_LIMIT;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::{Field, FieldEntry, FieldType};
|
use crate::schema::{Field, FieldEntry, FieldType};
|
||||||
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||||
@@ -390,7 +391,7 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
}
|
}
|
||||||
self.block.clear();
|
self.block.clear();
|
||||||
}
|
}
|
||||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
if doc_freq >= USE_SKIP_INFO_LIMIT {
|
||||||
let skip_data = self.skip_write.data();
|
let skip_data = self.skip_write.data();
|
||||||
VInt(skip_data.len() as u64).serialize(&mut self.output_write)?;
|
VInt(skip_data.len() as u64).serialize(&mut self.output_write)?;
|
||||||
self.output_write.write_all(skip_data)?;
|
self.output_write.write_all(skip_data)?;
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::{DocId, TERMINATED};
|
use crate::DocId;
|
||||||
use owned_read::OwnedRead;
|
use owned_read::OwnedRead;
|
||||||
|
|
||||||
pub struct SkipSerializer {
|
pub struct SkipSerializer {
|
||||||
@@ -51,143 +50,80 @@ impl SkipSerializer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct SkipReader {
|
pub(crate) struct SkipReader {
|
||||||
last_doc_in_block: DocId,
|
doc: DocId,
|
||||||
pub(crate) last_doc_in_previous_block: DocId,
|
|
||||||
owned_read: OwnedRead,
|
owned_read: OwnedRead,
|
||||||
|
doc_num_bits: u8,
|
||||||
|
tf_num_bits: u8,
|
||||||
|
tf_sum: u32,
|
||||||
skip_info: IndexRecordOption,
|
skip_info: IndexRecordOption,
|
||||||
byte_offset: usize,
|
|
||||||
remaining_docs: u32, // number of docs remaining, including the
|
|
||||||
// documents in the current block.
|
|
||||||
block_info: BlockInfo,
|
|
||||||
|
|
||||||
position_offset: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Eq, PartialEq, Copy, Debug)]
|
|
||||||
pub(crate) enum BlockInfo {
|
|
||||||
BitPacked {
|
|
||||||
doc_num_bits: u8,
|
|
||||||
tf_num_bits: u8,
|
|
||||||
tf_sum: u32,
|
|
||||||
},
|
|
||||||
VInt(u32),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for BlockInfo {
|
|
||||||
fn default() -> Self {
|
|
||||||
BlockInfo::VInt(0)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SkipReader {
|
impl SkipReader {
|
||||||
pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
pub fn new(data: OwnedRead, skip_info: IndexRecordOption) -> SkipReader {
|
||||||
SkipReader {
|
SkipReader {
|
||||||
last_doc_in_block: 0u32,
|
doc: 0u32,
|
||||||
last_doc_in_previous_block: 0u32,
|
owned_read: data,
|
||||||
owned_read: OwnedRead::new(data),
|
|
||||||
skip_info,
|
skip_info,
|
||||||
block_info: BlockInfo::default(),
|
doc_num_bits: 0u8,
|
||||||
byte_offset: 0,
|
tf_num_bits: 0u8,
|
||||||
remaining_docs: doc_freq,
|
tf_sum: 0u32,
|
||||||
position_offset: 0u64,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
|
pub fn reset(&mut self, data: OwnedRead) {
|
||||||
self.last_doc_in_block = 0u32;
|
self.doc = 0u32;
|
||||||
self.last_doc_in_previous_block = 0u32;
|
self.owned_read = data;
|
||||||
self.owned_read = OwnedRead::new(data);
|
self.doc_num_bits = 0u8;
|
||||||
self.block_info = BlockInfo::default();
|
self.tf_num_bits = 0u8;
|
||||||
self.byte_offset = 0;
|
self.tf_sum = 0u32;
|
||||||
self.remaining_docs = doc_freq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
pub fn total_block_len(&self) -> usize {
|
||||||
#[inline(always)]
|
(self.doc_num_bits + self.tf_num_bits) as usize * COMPRESSION_BLOCK_SIZE / 8
|
||||||
pub(crate) fn last_doc_in_block(&self) -> DocId {
|
|
||||||
self.last_doc_in_block
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn position_offset(&self) -> u64 {
|
pub fn doc(&self) -> DocId {
|
||||||
self.position_offset
|
self.doc
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn byte_offset(&self) -> usize {
|
pub fn doc_num_bits(&self) -> u8 {
|
||||||
self.byte_offset
|
self.doc_num_bits
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_block_info(&mut self) {
|
/// Number of bits used to encode term frequencies
|
||||||
let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
|
|
||||||
self.last_doc_in_block += doc_delta as DocId;
|
|
||||||
let doc_num_bits = self.owned_read.get(0);
|
|
||||||
match self.skip_info {
|
|
||||||
IndexRecordOption::Basic => {
|
|
||||||
self.owned_read.advance(1);
|
|
||||||
self.block_info = BlockInfo::BitPacked {
|
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits: 0,
|
|
||||||
tf_sum: 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
IndexRecordOption::WithFreqs => {
|
|
||||||
let tf_num_bits = self.owned_read.get(1);
|
|
||||||
self.block_info = BlockInfo::BitPacked {
|
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits,
|
|
||||||
tf_sum: 0,
|
|
||||||
};
|
|
||||||
self.owned_read.advance(2);
|
|
||||||
}
|
|
||||||
IndexRecordOption::WithFreqsAndPositions => {
|
|
||||||
let tf_num_bits = self.owned_read.get(1);
|
|
||||||
self.owned_read.advance(2);
|
|
||||||
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
|
||||||
self.block_info = BlockInfo::BitPacked {
|
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits,
|
|
||||||
tf_sum,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn block_info(&self) -> BlockInfo {
|
|
||||||
self.block_info
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance the skip reader to the block that may contain the target.
|
|
||||||
///
|
///
|
||||||
/// If the target is larger than all documents, the skip_reader
|
/// 0 if term frequencies are not enabled.
|
||||||
/// then advance to the last Variable In block.
|
pub fn tf_num_bits(&self) -> u8 {
|
||||||
pub fn seek(&mut self, target: DocId) {
|
self.tf_num_bits
|
||||||
while self.last_doc_in_block < target {
|
}
|
||||||
self.advance();
|
|
||||||
}
|
pub fn tf_sum(&self) -> u32 {
|
||||||
|
self.tf_sum
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn advance(&mut self) -> bool {
|
pub fn advance(&mut self) -> bool {
|
||||||
match self.block_info {
|
if self.owned_read.as_ref().is_empty() {
|
||||||
BlockInfo::BitPacked {
|
false
|
||||||
doc_num_bits,
|
|
||||||
tf_num_bits,
|
|
||||||
tf_sum,
|
|
||||||
} => {
|
|
||||||
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
|
|
||||||
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
|
|
||||||
self.position_offset += tf_sum as u64;
|
|
||||||
}
|
|
||||||
BlockInfo::VInt(num_vint_docs) => {
|
|
||||||
self.remaining_docs -= num_vint_docs;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.last_doc_in_previous_block = self.last_doc_in_block;
|
|
||||||
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
|
|
||||||
self.read_block_info();
|
|
||||||
true
|
|
||||||
} else {
|
} else {
|
||||||
self.last_doc_in_block = TERMINATED;
|
let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
|
||||||
self.block_info = BlockInfo::VInt(self.remaining_docs);
|
self.doc += doc_delta as DocId;
|
||||||
self.remaining_docs > 0
|
self.doc_num_bits = self.owned_read.get(0);
|
||||||
|
match self.skip_info {
|
||||||
|
IndexRecordOption::Basic => {
|
||||||
|
self.owned_read.advance(1);
|
||||||
|
}
|
||||||
|
IndexRecordOption::WithFreqs => {
|
||||||
|
self.tf_num_bits = self.owned_read.get(1);
|
||||||
|
self.owned_read.advance(2);
|
||||||
|
}
|
||||||
|
IndexRecordOption::WithFreqsAndPositions => {
|
||||||
|
self.tf_num_bits = self.owned_read.get(1);
|
||||||
|
self.owned_read.advance(2);
|
||||||
|
self.tf_sum =
|
||||||
|
u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -195,11 +131,9 @@ impl SkipReader {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::BlockInfo;
|
|
||||||
use super::IndexRecordOption;
|
use super::IndexRecordOption;
|
||||||
use super::{SkipReader, SkipSerializer};
|
use super::{SkipReader, SkipSerializer};
|
||||||
use crate::directory::ReadOnlySource;
|
use owned_read::OwnedRead;
|
||||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_skip_with_freq() {
|
fn test_skip_with_freq() {
|
||||||
@@ -211,34 +145,15 @@ mod tests {
|
|||||||
skip_serializer.write_term_freq(2u8);
|
skip_serializer.write_term_freq(2u8);
|
||||||
skip_serializer.data().to_owned()
|
skip_serializer.data().to_owned()
|
||||||
};
|
};
|
||||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
let mut skip_reader = SkipReader::new(OwnedRead::new(buf), IndexRecordOption::WithFreqs);
|
||||||
let mut skip_reader = SkipReader::new(
|
|
||||||
ReadOnlySource::new(buf),
|
|
||||||
doc_freq,
|
|
||||||
IndexRecordOption::WithFreqs,
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
assert!(skip_reader.advance());
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.doc(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(skip_reader.doc_num_bits(), 2u8);
|
||||||
skip_reader.block_info(),
|
assert_eq!(skip_reader.tf_num_bits(), 3u8);
|
||||||
BlockInfo::BitPacked {
|
|
||||||
doc_num_bits: 2u8,
|
|
||||||
tf_num_bits: 3u8,
|
|
||||||
tf_sum: 0
|
|
||||||
}
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
assert!(skip_reader.advance());
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
assert_eq!(skip_reader.doc(), 5u32);
|
||||||
assert_eq!(
|
assert_eq!(skip_reader.doc_num_bits(), 5u8);
|
||||||
skip_reader.block_info(),
|
assert_eq!(skip_reader.tf_num_bits(), 2u8);
|
||||||
BlockInfo::BitPacked {
|
|
||||||
doc_num_bits: 5u8,
|
|
||||||
tf_num_bits: 2u8,
|
|
||||||
tf_sum: 0
|
|
||||||
}
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
|
|
||||||
assert!(!skip_reader.advance());
|
assert!(!skip_reader.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -250,60 +165,13 @@ mod tests {
|
|||||||
skip_serializer.write_doc(5u32, 5u8);
|
skip_serializer.write_doc(5u32, 5u8);
|
||||||
skip_serializer.data().to_owned()
|
skip_serializer.data().to_owned()
|
||||||
};
|
};
|
||||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
let mut skip_reader = SkipReader::new(OwnedRead::new(buf), IndexRecordOption::Basic);
|
||||||
let mut skip_reader = SkipReader::new(
|
|
||||||
ReadOnlySource::from(buf),
|
|
||||||
doc_freq,
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
assert!(skip_reader.advance());
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.doc(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(skip_reader.doc_num_bits(), 2u8);
|
||||||
skip_reader.block_info(),
|
|
||||||
BlockInfo::BitPacked {
|
|
||||||
doc_num_bits: 2u8,
|
|
||||||
tf_num_bits: 0,
|
|
||||||
tf_sum: 0u32
|
|
||||||
}
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
assert!(skip_reader.advance());
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
assert_eq!(skip_reader.doc(), 5u32);
|
||||||
assert_eq!(
|
assert_eq!(skip_reader.doc_num_bits(), 5u8);
|
||||||
skip_reader.block_info(),
|
|
||||||
BlockInfo::BitPacked {
|
|
||||||
doc_num_bits: 5u8,
|
|
||||||
tf_num_bits: 0,
|
|
||||||
tf_sum: 0u32
|
|
||||||
}
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
|
|
||||||
assert!(!skip_reader.advance());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_skip_multiple_of_block_size() {
|
|
||||||
let buf = {
|
|
||||||
let mut skip_serializer = SkipSerializer::new();
|
|
||||||
skip_serializer.write_doc(1u32, 2u8);
|
|
||||||
skip_serializer.data().to_owned()
|
|
||||||
};
|
|
||||||
let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
|
|
||||||
let mut skip_reader = SkipReader::new(
|
|
||||||
ReadOnlySource::from(buf),
|
|
||||||
doc_freq,
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
|
||||||
assert_eq!(
|
|
||||||
skip_reader.block_info(),
|
|
||||||
BlockInfo::BitPacked {
|
|
||||||
doc_num_bits: 2u8,
|
|
||||||
tf_num_bits: 0,
|
|
||||||
tf_sum: 0u32
|
|
||||||
}
|
|
||||||
);
|
|
||||||
assert!(!skip_reader.advance());
|
assert!(!skip_reader.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -106,8 +106,8 @@ impl MemoryArena {
|
|||||||
///
|
///
|
||||||
/// Internally, it counts a number of `1MB` pages
|
/// Internally, it counts a number of `1MB` pages
|
||||||
/// and therefore delivers an upperbound.
|
/// and therefore delivers an upperbound.
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> u64 {
|
||||||
self.pages.len() * PAGE_SIZE
|
(self.pages.len() as u64) * (PAGE_SIZE as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_at<Item: Copy + 'static>(&mut self, addr: Addr, val: Item) {
|
pub fn write_at<Item: Copy + 'static>(&mut self, addr: Addr, val: Item) {
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
use murmurhash32::murmurhash2;
|
use murmurhash32;
|
||||||
|
|
||||||
|
use self::murmurhash32::murmurhash2;
|
||||||
|
|
||||||
use super::{Addr, MemoryArena};
|
use super::{Addr, MemoryArena};
|
||||||
use crate::postings::stacker::memory_arena::store;
|
use crate::postings::stacker::memory_arena::store;
|
||||||
@@ -10,8 +12,8 @@ use std::slice;
|
|||||||
|
|
||||||
/// Returns the actual memory size in bytes
|
/// Returns the actual memory size in bytes
|
||||||
/// required to create a table of size $2^num_bits$.
|
/// required to create a table of size $2^num_bits$.
|
||||||
pub fn compute_table_size(num_bits: usize) -> usize {
|
pub fn compute_table_size(num_bits: usize) -> u64 {
|
||||||
(1 << num_bits) * mem::size_of::<KeyValue>()
|
(1u64 << num_bits as u64) * mem::size_of::<KeyValue>() as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `KeyValue` is the item stored in the hash table.
|
/// `KeyValue` is the item stored in the hash table.
|
||||||
@@ -114,8 +116,8 @@ impl TermHashMap {
|
|||||||
QuadraticProbing::compute(hash as usize, self.mask)
|
QuadraticProbing::compute(hash as usize, self.mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mem_usage(&self) -> usize {
|
pub fn mem_usage(&self) -> u64 {
|
||||||
self.table.len() * mem::size_of::<KeyValue>()
|
self.table.len() as u64 * mem::size_of::<KeyValue>() as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_saturated(&self) -> bool {
|
fn is_saturated(&self) -> bool {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::core::Searcher;
|
use crate::core::Searcher;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::query::boost_query::BoostScorer;
|
use crate::query::boost_query::BoostScorer;
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::{Explanation, Query, Scorer, Weight};
|
use crate::query::{Explanation, Query, Scorer, Weight};
|
||||||
@@ -25,6 +25,7 @@ pub struct AllWeight;
|
|||||||
impl Weight for AllWeight {
|
impl Weight for AllWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let all_scorer = AllScorer {
|
let all_scorer = AllScorer {
|
||||||
|
state: State::NotStarted,
|
||||||
doc: 0u32,
|
doc: 0u32,
|
||||||
max_doc: reader.max_doc(),
|
max_doc: reader.max_doc(),
|
||||||
};
|
};
|
||||||
@@ -39,20 +40,39 @@ impl Weight for AllWeight {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum State {
|
||||||
|
NotStarted,
|
||||||
|
Started,
|
||||||
|
Finished,
|
||||||
|
}
|
||||||
|
|
||||||
/// Scorer associated to the `AllQuery` query.
|
/// Scorer associated to the `AllQuery` query.
|
||||||
pub struct AllScorer {
|
pub struct AllScorer {
|
||||||
|
state: State,
|
||||||
doc: DocId,
|
doc: DocId,
|
||||||
max_doc: DocId,
|
max_doc: DocId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DocSet for AllScorer {
|
impl DocSet for AllScorer {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
if self.doc + 1 >= self.max_doc {
|
match self.state {
|
||||||
self.doc = TERMINATED;
|
State::NotStarted => {
|
||||||
return TERMINATED;
|
self.state = State::Started;
|
||||||
|
self.doc = 0;
|
||||||
|
}
|
||||||
|
State::Started => {
|
||||||
|
self.doc += 1u32;
|
||||||
|
}
|
||||||
|
State::Finished => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.doc < self.max_doc {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
self.state = State::Finished;
|
||||||
|
false
|
||||||
}
|
}
|
||||||
self.doc += 1;
|
|
||||||
self.doc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
@@ -73,7 +93,6 @@ impl Scorer for AllScorer {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::AllQuery;
|
use super::AllQuery;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
use crate::schema::{Schema, TEXT};
|
use crate::schema::{Schema, TEXT};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
@@ -101,16 +120,18 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
||||||
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.advance(), 1u32);
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 1u32);
|
assert_eq!(scorer.doc(), 1u32);
|
||||||
assert_eq!(scorer.advance(), TERMINATED);
|
assert!(!scorer.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let reader = searcher.segment_reader(1);
|
let reader = searcher.segment_reader(1);
|
||||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
||||||
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.advance(), TERMINATED);
|
assert!(!scorer.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,11 +144,13 @@ mod tests {
|
|||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
|
||||||
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 2.0f32);
|
assert_eq!(scorer.score(), 2.0f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
|
||||||
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.5f32);
|
assert_eq!(scorer.score(), 1.5f32);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ use crate::query::{Scorer, Weight};
|
|||||||
use crate::schema::{Field, IndexRecordOption};
|
use crate::schema::{Field, IndexRecordOption};
|
||||||
use crate::termdict::{TermDictionary, TermStreamer};
|
use crate::termdict::{TermDictionary, TermStreamer};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
|
use crate::{Result, SkipResult};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tantivy_fst::Automaton;
|
use tantivy_fst::Automaton;
|
||||||
|
|
||||||
@@ -51,13 +51,10 @@ where
|
|||||||
let term_info = term_stream.value();
|
let term_info = term_stream.value();
|
||||||
let mut block_segment_postings = inverted_index
|
let mut block_segment_postings = inverted_index
|
||||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
||||||
loop {
|
while block_segment_postings.advance() {
|
||||||
for &doc in block_segment_postings.docs() {
|
for &doc in block_segment_postings.docs() {
|
||||||
doc_bitset.insert(doc);
|
doc_bitset.insert(doc);
|
||||||
}
|
}
|
||||||
if !block_segment_postings.advance() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||||
@@ -67,7 +64,7 @@ where
|
|||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||||
if scorer.seek(doc) == doc {
|
if scorer.skip_next(doc) == SkipResult::Reached {
|
||||||
Ok(Explanation::new("AutomatonScorer", 1.0f32))
|
Ok(Explanation::new("AutomatonScorer", 1.0f32))
|
||||||
} else {
|
} else {
|
||||||
Err(TantivyError::InvalidArgument(
|
Err(TantivyError::InvalidArgument(
|
||||||
@@ -80,7 +77,6 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::AutomatonWeight;
|
use super::AutomatonWeight;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::query::Weight;
|
use crate::query::Weight;
|
||||||
use crate::schema::{Schema, STRING};
|
use crate::schema::{Schema, STRING};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
@@ -145,12 +141,13 @@ mod tests {
|
|||||||
let mut scorer = automaton_weight
|
let mut scorer = automaton_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.0f32);
|
assert_eq!(scorer.score(), 1.0f32);
|
||||||
assert_eq!(scorer.advance(), 2u32);
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 2u32);
|
assert_eq!(scorer.doc(), 2u32);
|
||||||
assert_eq!(scorer.score(), 1.0f32);
|
assert_eq!(scorer.score(), 1.0f32);
|
||||||
assert_eq!(scorer.advance(), TERMINATED);
|
assert!(!scorer.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -163,6 +160,7 @@ mod tests {
|
|||||||
let mut scorer = automaton_weight
|
let mut scorer = automaton_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.32f32)
|
.scorer(searcher.segment_reader(0u32), 1.32f32)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(scorer.advance());
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.32f32);
|
assert_eq!(scorer.score(), 1.32f32);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use crate::common::{BitSet, TinySet};
|
use crate::common::{BitSet, TinySet};
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
/// A `BitSetDocSet` makes it possible to iterate through a bitset as if it was a `DocSet`.
|
/// A `BitSetDocSet` makes it possible to iterate through a bitset as if it was a `DocSet`.
|
||||||
///
|
///
|
||||||
@@ -32,50 +33,75 @@ impl From<BitSet> for BitSetDocSet {
|
|||||||
} else {
|
} else {
|
||||||
docs.tinyset(0)
|
docs.tinyset(0)
|
||||||
};
|
};
|
||||||
let mut docset = BitSetDocSet {
|
BitSetDocSet {
|
||||||
docs,
|
docs,
|
||||||
cursor_bucket: 0,
|
cursor_bucket: 0,
|
||||||
cursor_tinybitset: first_tiny_bitset,
|
cursor_tinybitset: first_tiny_bitset,
|
||||||
doc: 0u32,
|
doc: 0u32,
|
||||||
};
|
}
|
||||||
docset.advance();
|
|
||||||
docset
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DocSet for BitSetDocSet {
|
impl DocSet for BitSetDocSet {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
if let Some(lower) = self.cursor_tinybitset.pop_lowest() {
|
if let Some(lower) = self.cursor_tinybitset.pop_lowest() {
|
||||||
self.doc = (self.cursor_bucket as u32 * 64u32) | lower;
|
self.doc = (self.cursor_bucket as u32 * 64u32) | lower;
|
||||||
return self.doc;
|
return true;
|
||||||
}
|
}
|
||||||
if let Some(cursor_bucket) = self.docs.first_non_empty_bucket(self.cursor_bucket + 1) {
|
if let Some(cursor_bucket) = self.docs.first_non_empty_bucket(self.cursor_bucket + 1) {
|
||||||
self.go_to_bucket(cursor_bucket);
|
self.go_to_bucket(cursor_bucket);
|
||||||
let lower = self.cursor_tinybitset.pop_lowest().unwrap();
|
let lower = self.cursor_tinybitset.pop_lowest().unwrap();
|
||||||
self.doc = (cursor_bucket * 64u32) | lower;
|
self.doc = (cursor_bucket * 64u32) | lower;
|
||||||
self.doc
|
true
|
||||||
} else {
|
} else {
|
||||||
self.doc = TERMINATED;
|
false
|
||||||
TERMINATED
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
|
// skip is required to advance.
|
||||||
|
if !self.advance() {
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
let target_bucket = target / 64u32;
|
let target_bucket = target / 64u32;
|
||||||
|
|
||||||
// Mask for all of the bits greater or equal
|
// Mask for all of the bits greater or equal
|
||||||
// to our target document.
|
// to our target document.
|
||||||
if target_bucket > self.cursor_bucket {
|
match target_bucket.cmp(&self.cursor_bucket) {
|
||||||
self.go_to_bucket(target_bucket);
|
Ordering::Greater => {
|
||||||
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
|
self.go_to_bucket(target_bucket);
|
||||||
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
|
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
|
||||||
self.advance();
|
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
|
||||||
|
if !self.advance() {
|
||||||
|
SkipResult::End
|
||||||
|
} else if self.doc() == target {
|
||||||
|
SkipResult::Reached
|
||||||
|
} else {
|
||||||
|
debug_assert!(self.doc() > target);
|
||||||
|
SkipResult::OverStep
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ordering::Equal => loop {
|
||||||
|
match self.doc().cmp(&target) {
|
||||||
|
Ordering::Less => {
|
||||||
|
if !self.advance() {
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ordering::Equal => {
|
||||||
|
return SkipResult::Reached;
|
||||||
|
}
|
||||||
|
Ordering::Greater => {
|
||||||
|
debug_assert!(self.doc() > target);
|
||||||
|
return SkipResult::OverStep;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Ordering::Less => {
|
||||||
|
debug_assert!(self.doc() > target);
|
||||||
|
SkipResult::OverStep
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let mut doc = self.doc();
|
|
||||||
while doc < target {
|
|
||||||
doc = self.advance();
|
|
||||||
}
|
|
||||||
doc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
@@ -96,7 +122,7 @@ impl DocSet for BitSetDocSet {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::BitSetDocSet;
|
use super::BitSetDocSet;
|
||||||
use crate::common::BitSet;
|
use crate::common::BitSet;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
fn create_docbitset(docs: &[DocId], max_doc: DocId) -> BitSetDocSet {
|
fn create_docbitset(docs: &[DocId], max_doc: DocId) -> BitSetDocSet {
|
||||||
@@ -107,24 +133,19 @@ mod tests {
|
|||||||
BitSetDocSet::from(docset)
|
BitSetDocSet::from(docset)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_empty() {
|
|
||||||
let bitset = BitSet::with_max_value(1000);
|
|
||||||
let mut empty = BitSetDocSet::from(bitset);
|
|
||||||
assert_eq!(empty.advance(), TERMINATED)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_go_through_sequential(docs: &[DocId]) {
|
fn test_go_through_sequential(docs: &[DocId]) {
|
||||||
let mut docset = create_docbitset(docs, 1_000u32);
|
let mut docset = create_docbitset(docs, 1_000u32);
|
||||||
for &doc in docs {
|
for &doc in docs {
|
||||||
|
assert!(docset.advance());
|
||||||
assert_eq!(doc, docset.doc());
|
assert_eq!(doc, docset.doc());
|
||||||
docset.advance();
|
|
||||||
}
|
}
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_docbitset_sequential() {
|
fn test_docbitset_sequential() {
|
||||||
|
test_go_through_sequential(&[]);
|
||||||
test_go_through_sequential(&[1, 2, 3]);
|
test_go_through_sequential(&[1, 2, 3]);
|
||||||
test_go_through_sequential(&[1, 2, 3, 4, 5, 63, 64, 65]);
|
test_go_through_sequential(&[1, 2, 3, 4, 5, 63, 64, 65]);
|
||||||
test_go_through_sequential(&[63, 64, 65]);
|
test_go_through_sequential(&[63, 64, 65]);
|
||||||
@@ -135,64 +156,64 @@ mod tests {
|
|||||||
fn test_docbitset_skip() {
|
fn test_docbitset_skip() {
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112], 10_000);
|
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112], 10_000);
|
||||||
assert_eq!(docset.seek(7), 7);
|
assert_eq!(docset.skip_next(7), SkipResult::Reached);
|
||||||
assert_eq!(docset.doc(), 7);
|
assert_eq!(docset.doc(), 7);
|
||||||
assert_eq!(docset.advance(), 5112);
|
assert!(docset.advance(), 7);
|
||||||
assert_eq!(docset.doc(), 5112);
|
assert_eq!(docset.doc(), 5112);
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112], 10_000);
|
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112], 10_000);
|
||||||
assert_eq!(docset.seek(3), 5);
|
assert_eq!(docset.skip_next(3), SkipResult::OverStep);
|
||||||
assert_eq!(docset.doc(), 5);
|
assert_eq!(docset.doc(), 5);
|
||||||
assert_eq!(docset.advance(), 6);
|
assert!(docset.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[5112], 10_000);
|
let mut docset = create_docbitset(&[5112], 10_000);
|
||||||
assert_eq!(docset.seek(5112), 5112);
|
assert_eq!(docset.skip_next(5112), SkipResult::Reached);
|
||||||
assert_eq!(docset.doc(), 5112);
|
assert_eq!(docset.doc(), 5112);
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[5112], 10_000);
|
let mut docset = create_docbitset(&[5112], 10_000);
|
||||||
assert_eq!(docset.seek(5113), TERMINATED);
|
assert_eq!(docset.skip_next(5113), SkipResult::End);
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[5112], 10_000);
|
let mut docset = create_docbitset(&[5112], 10_000);
|
||||||
assert_eq!(docset.seek(5111), 5112);
|
assert_eq!(docset.skip_next(5111), SkipResult::OverStep);
|
||||||
assert_eq!(docset.doc(), 5112);
|
assert_eq!(docset.doc(), 5112);
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5500, 6666], 10_000);
|
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5500, 6666], 10_000);
|
||||||
assert_eq!(docset.seek(5112), 5112);
|
assert_eq!(docset.skip_next(5112), SkipResult::Reached);
|
||||||
assert_eq!(docset.doc(), 5112);
|
assert_eq!(docset.doc(), 5112);
|
||||||
assert_eq!(docset.advance(), 5500);
|
assert!(docset.advance());
|
||||||
assert_eq!(docset.doc(), 5500);
|
assert_eq!(docset.doc(), 5500);
|
||||||
assert_eq!(docset.advance(), 6666);
|
assert!(docset.advance());
|
||||||
assert_eq!(docset.doc(), 6666);
|
assert_eq!(docset.doc(), 6666);
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5500, 6666], 10_000);
|
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5500, 6666], 10_000);
|
||||||
assert_eq!(docset.seek(5111), 5112);
|
assert_eq!(docset.skip_next(5111), SkipResult::OverStep);
|
||||||
assert_eq!(docset.doc(), 5112);
|
assert_eq!(docset.doc(), 5112);
|
||||||
assert_eq!(docset.advance(), 5500);
|
assert!(docset.advance());
|
||||||
assert_eq!(docset.doc(), 5500);
|
assert_eq!(docset.doc(), 5500);
|
||||||
assert_eq!(docset.advance(), 6666);
|
assert!(docset.advance());
|
||||||
assert_eq!(docset.doc(), 6666);
|
assert_eq!(docset.doc(), 6666);
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5513, 6666], 10_000);
|
let mut docset = create_docbitset(&[1, 5, 6, 7, 5112, 5513, 6666], 10_000);
|
||||||
assert_eq!(docset.seek(5111), 5112);
|
assert_eq!(docset.skip_next(5111), SkipResult::OverStep);
|
||||||
assert_eq!(docset.doc(), 5112);
|
assert_eq!(docset.doc(), 5112);
|
||||||
assert_eq!(docset.advance(), 5513);
|
assert!(docset.advance());
|
||||||
assert_eq!(docset.doc(), 5513);
|
assert_eq!(docset.doc(), 5513);
|
||||||
assert_eq!(docset.advance(), 6666);
|
assert!(docset.advance());
|
||||||
assert_eq!(docset.doc(), 6666);
|
assert_eq!(docset.doc(), 6666);
|
||||||
assert_eq!(docset.advance(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -202,7 +223,6 @@ mod bench {
|
|||||||
|
|
||||||
use super::BitSet;
|
use super::BitSet;
|
||||||
use super::BitSetDocSet;
|
use super::BitSetDocSet;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::test;
|
use crate::test;
|
||||||
use crate::tests;
|
use crate::tests;
|
||||||
use crate::DocSet;
|
use crate::DocSet;
|
||||||
@@ -237,7 +257,7 @@ mod bench {
|
|||||||
}
|
}
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
let mut docset = BitSetDocSet::from(bitset.clone());
|
let mut docset = BitSetDocSet::from(bitset.clone());
|
||||||
while docset.advance() != TERMINATED {}
|
while docset.advance() {}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ use crate::core::SegmentReader;
|
|||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
|
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
|
||||||
use crate::query::term_query::TermScorer;
|
use crate::query::term_query::TermScorer;
|
||||||
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
|
|
||||||
use crate::query::EmptyScorer;
|
use crate::query::EmptyScorer;
|
||||||
use crate::query::Exclude;
|
use crate::query::Exclude;
|
||||||
use crate::query::Occur;
|
use crate::query::Occur;
|
||||||
@@ -11,21 +10,16 @@ use crate::query::Scorer;
|
|||||||
use crate::query::Union;
|
use crate::query::Union;
|
||||||
use crate::query::Weight;
|
use crate::query::Weight;
|
||||||
use crate::query::{intersect_scorers, Explanation};
|
use crate::query::{intersect_scorers, Explanation};
|
||||||
use crate::{DocId, Score};
|
use crate::{DocId, SkipResult};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
enum SpecializedScorer<TScoreCombiner: ScoreCombiner> {
|
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer>
|
||||||
TermUnion(Union<TermScorer, TScoreCombiner>),
|
|
||||||
Other(Box<dyn Scorer>),
|
|
||||||
}
|
|
||||||
|
|
||||||
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer<TScoreCombiner>
|
|
||||||
where
|
where
|
||||||
TScoreCombiner: ScoreCombiner,
|
TScoreCombiner: ScoreCombiner,
|
||||||
{
|
{
|
||||||
assert!(!scorers.is_empty());
|
assert!(!scorers.is_empty());
|
||||||
if scorers.len() == 1 {
|
if scorers.len() == 1 {
|
||||||
return SpecializedScorer::Other(scorers.into_iter().next().unwrap()); //< we checked the size beforehands
|
return scorers.into_iter().next().unwrap(); //< we checked the size beforehands
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -35,21 +29,14 @@ where
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
|
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
|
||||||
.collect();
|
.collect();
|
||||||
return SpecializedScorer::TermUnion(Union::<TermScorer, TScoreCombiner>::from(
|
let scorer: Box<dyn Scorer> =
|
||||||
scorers,
|
Box::new(Union::<TermScorer, TScoreCombiner>::from(scorers));
|
||||||
));
|
return scorer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TScoreCombiner: ScoreCombiner> Into<Box<dyn Scorer>> for SpecializedScorer<TScoreCombiner> {
|
let scorer: Box<dyn Scorer> = Box::new(Union::<_, TScoreCombiner>::from(scorers));
|
||||||
fn into(self) -> Box<dyn Scorer> {
|
scorer
|
||||||
match self {
|
|
||||||
Self::TermUnion(union) => Box::new(union),
|
|
||||||
Self::Other(scorer) => scorer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BooleanWeight {
|
pub struct BooleanWeight {
|
||||||
@@ -85,50 +72,41 @@ impl BooleanWeight {
|
|||||||
&self,
|
&self,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
boost: f32,
|
boost: f32,
|
||||||
) -> crate::Result<SpecializedScorer<TScoreCombiner>> {
|
) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
|
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
|
||||||
|
|
||||||
let should_scorer_opt: Option<SpecializedScorer<TScoreCombiner>> = per_occur_scorers
|
let should_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||||
.remove(&Occur::Should)
|
.remove(&Occur::Should)
|
||||||
.map(scorer_union::<TScoreCombiner>);
|
.map(scorer_union::<TScoreCombiner>);
|
||||||
|
|
||||||
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||||
.remove(&Occur::MustNot)
|
.remove(&Occur::MustNot)
|
||||||
.map(scorer_union::<TScoreCombiner>)
|
.map(scorer_union::<TScoreCombiner>);
|
||||||
.map(Into::into);
|
|
||||||
|
|
||||||
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||||
.remove(&Occur::Must)
|
.remove(&Occur::Must)
|
||||||
.map(intersect_scorers);
|
.map(intersect_scorers);
|
||||||
|
|
||||||
let positive_scorer: SpecializedScorer<TScoreCombiner> =
|
let positive_scorer: Box<dyn Scorer> = match (should_scorer_opt, must_scorer_opt) {
|
||||||
match (should_scorer_opt, must_scorer_opt) {
|
(Some(should_scorer), Some(must_scorer)) => {
|
||||||
(Some(should_scorer), Some(must_scorer)) => {
|
if self.scoring_enabled {
|
||||||
if self.scoring_enabled {
|
Box::new(RequiredOptionalScorer::<_, _, TScoreCombiner>::new(
|
||||||
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
|
must_scorer,
|
||||||
Box<dyn Scorer>,
|
should_scorer,
|
||||||
Box<dyn Scorer>,
|
))
|
||||||
TScoreCombiner,
|
} else {
|
||||||
>::new(
|
must_scorer
|
||||||
must_scorer, should_scorer.into()
|
|
||||||
)))
|
|
||||||
} else {
|
|
||||||
SpecializedScorer::Other(must_scorer)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
|
}
|
||||||
(Some(should_scorer), None) => should_scorer,
|
(None, Some(must_scorer)) => must_scorer,
|
||||||
(None, None) => {
|
(Some(should_scorer), None) => should_scorer,
|
||||||
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
|
(None, None) => {
|
||||||
}
|
return Ok(Box::new(EmptyScorer));
|
||||||
};
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(exclude_scorer) = exclude_scorer_opt {
|
if let Some(exclude_scorer) = exclude_scorer_opt {
|
||||||
let positive_scorer_boxed: Box<dyn Scorer> = positive_scorer.into();
|
Ok(Box::new(Exclude::new(positive_scorer, exclude_scorer)))
|
||||||
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
|
|
||||||
positive_scorer_boxed,
|
|
||||||
exclude_scorer,
|
|
||||||
))))
|
|
||||||
} else {
|
} else {
|
||||||
Ok(positive_scorer)
|
Ok(positive_scorer)
|
||||||
}
|
}
|
||||||
@@ -148,16 +126,14 @@ impl Weight for BooleanWeight {
|
|||||||
}
|
}
|
||||||
} else if self.scoring_enabled {
|
} else if self.scoring_enabled {
|
||||||
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
|
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
|
||||||
.map(Into::into)
|
|
||||||
} else {
|
} else {
|
||||||
self.complex_scorer::<DoNothingCombiner>(reader, boost)
|
self.complex_scorer::<DoNothingCombiner>(reader, boost)
|
||||||
.map(Into::into)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
if !self.scoring_enabled {
|
if !self.scoring_enabled {
|
||||||
@@ -174,51 +150,6 @@ impl Weight for BooleanWeight {
|
|||||||
}
|
}
|
||||||
Ok(explanation)
|
Ok(explanation)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn for_each(
|
|
||||||
&self,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score),
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
|
|
||||||
match scorer {
|
|
||||||
SpecializedScorer::TermUnion(mut union_scorer) => {
|
|
||||||
for_each_scorer(&mut union_scorer, callback);
|
|
||||||
}
|
|
||||||
SpecializedScorer::Other(mut scorer) => {
|
|
||||||
for_each_scorer(scorer.as_mut(), callback);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
|
||||||
/// is exceeding a given threshold.
|
|
||||||
///
|
|
||||||
/// This method is useful for the TopDocs collector.
|
|
||||||
/// For all docsets, the blanket implementation has the benefit
|
|
||||||
/// of prefiltering (doc, score) pairs, avoiding the
|
|
||||||
/// virtual dispatch cost.
|
|
||||||
///
|
|
||||||
/// More importantly, it makes it possible for scorers to implement
|
|
||||||
/// important optimization (e.g. BlockWAND for union).
|
|
||||||
fn for_each_pruning(
|
|
||||||
&self,
|
|
||||||
threshold: f32,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
|
|
||||||
match scorer {
|
|
||||||
SpecializedScorer::TermUnion(mut union_scorer) => {
|
|
||||||
for_each_pruning_scorer(&mut union_scorer, threshold, callback);
|
|
||||||
}
|
|
||||||
SpecializedScorer::Other(mut scorer) => {
|
|
||||||
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_positive_occur(occur: Occur) -> bool {
|
fn is_positive_occur(occur: Occur) -> bool {
|
||||||
|
|||||||
@@ -31,11 +31,24 @@ mod tests {
|
|||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field => "a b c"));
|
let doc = doc!(text_field => "a b c");
|
||||||
index_writer.add_document(doc!(text_field => "a c"));
|
index_writer.add_document(doc);
|
||||||
index_writer.add_document(doc!(text_field => "b c"));
|
}
|
||||||
index_writer.add_document(doc!(text_field => "a b c d"));
|
{
|
||||||
index_writer.add_document(doc!(text_field => "d"));
|
let doc = doc!(text_field => "a c");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let doc = doc!(text_field => "b c");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let doc = doc!(text_field => "a b c d");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let doc = doc!(text_field => "d");
|
||||||
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
@@ -207,6 +220,7 @@ mod tests {
|
|||||||
let mut boolean_scorer = boolean_weight
|
let mut boolean_scorer = boolean_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(boolean_scorer.advance());
|
||||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||||
assert_nearly_equals(boolean_scorer.score(), 0.84163445f32);
|
assert_nearly_equals(boolean_scorer.score(), 0.84163445f32);
|
||||||
}
|
}
|
||||||
@@ -214,6 +228,7 @@ mod tests {
|
|||||||
let mut boolean_scorer = boolean_weight
|
let mut boolean_scorer = boolean_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 2.0f32)
|
.scorer(searcher.segment_reader(0u32), 2.0f32)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(boolean_scorer.advance());
|
||||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||||
assert_nearly_equals(boolean_scorer.score(), 1.6832689f32);
|
assert_nearly_equals(boolean_scorer.score(), 1.6832689f32);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
use crate::common::BitSet;
|
||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::{Explanation, Query, Scorer, Weight};
|
use crate::query::{Explanation, Query, Scorer, Weight};
|
||||||
use crate::{DocId, DocSet, Searcher, SegmentReader, Term};
|
use crate::{DocId, DocSet, Searcher, SegmentReader, SkipResult, Term};
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
@@ -71,7 +72,7 @@ impl Weight for BoostWeight {
|
|||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
let mut explanation =
|
let mut explanation =
|
||||||
@@ -98,12 +99,12 @@ impl<S: Scorer> BoostScorer<S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<S: Scorer> DocSet for BoostScorer<S> {
|
impl<S: Scorer> DocSet for BoostScorer<S> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
self.underlying.advance()
|
self.underlying.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
self.underlying.seek(target)
|
self.underlying.skip_next(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||||
@@ -118,6 +119,10 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
|
|||||||
self.underlying.size_hint()
|
self.underlying.size_hint()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
self.underlying.append_to_bitset(bitset)
|
||||||
|
}
|
||||||
|
|
||||||
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
self.underlying.count(delete_bitset)
|
self.underlying.count(delete_bitset)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use super::Scorer;
|
use super::Scorer;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::Weight;
|
use crate::query::Weight;
|
||||||
use crate::query::{Explanation, Query};
|
use crate::query::{Explanation, Query};
|
||||||
@@ -49,12 +48,15 @@ impl Weight for EmptyWeight {
|
|||||||
pub struct EmptyScorer;
|
pub struct EmptyScorer;
|
||||||
|
|
||||||
impl DocSet for EmptyScorer {
|
impl DocSet for EmptyScorer {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
TERMINATED
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
TERMINATED
|
panic!(
|
||||||
|
"You may not call .doc() on a scorer \
|
||||||
|
where the last call to advance() did not return true."
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
fn size_hint(&self) -> u32 {
|
||||||
@@ -70,15 +72,18 @@ impl Scorer for EmptyScorer {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::query::EmptyScorer;
|
use crate::query::EmptyScorer;
|
||||||
use crate::DocSet;
|
use crate::DocSet;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_empty_scorer() {
|
fn test_empty_scorer() {
|
||||||
let mut empty_scorer = EmptyScorer;
|
let mut empty_scorer = EmptyScorer;
|
||||||
assert_eq!(empty_scorer.doc(), TERMINATED);
|
assert!(!empty_scorer.advance());
|
||||||
assert_eq!(empty_scorer.advance(), TERMINATED);
|
}
|
||||||
assert_eq!(empty_scorer.doc(), TERMINATED);
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn test_empty_scorer_panic_on_doc_call() {
|
||||||
|
EmptyScorer.doc();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,37 +1,41 @@
|
|||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
enum State {
|
||||||
|
ExcludeOne(DocId),
|
||||||
|
Finished,
|
||||||
|
}
|
||||||
|
|
||||||
/// Filters a given `DocSet` by removing the docs from a given `DocSet`.
|
/// Filters a given `DocSet` by removing the docs from a given `DocSet`.
|
||||||
///
|
///
|
||||||
/// The excluding docset has no impact on scoring.
|
/// The excluding docset has no impact on scoring.
|
||||||
pub struct Exclude<TDocSet, TDocSetExclude> {
|
pub struct Exclude<TDocSet, TDocSetExclude> {
|
||||||
underlying_docset: TDocSet,
|
underlying_docset: TDocSet,
|
||||||
excluding_docset: TDocSetExclude,
|
excluding_docset: TDocSetExclude,
|
||||||
|
excluding_state: State,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet, TDocSetExclude> Exclude<TDocSet, TDocSetExclude>
|
impl<TDocSet, TDocSetExclude> Exclude<TDocSet, TDocSetExclude>
|
||||||
where
|
where
|
||||||
TDocSet: DocSet,
|
|
||||||
TDocSetExclude: DocSet,
|
TDocSetExclude: DocSet,
|
||||||
{
|
{
|
||||||
/// Creates a new `ExcludeScorer`
|
/// Creates a new `ExcludeScorer`
|
||||||
pub fn new(
|
pub fn new(
|
||||||
mut underlying_docset: TDocSet,
|
underlying_docset: TDocSet,
|
||||||
mut excluding_docset: TDocSetExclude,
|
mut excluding_docset: TDocSetExclude,
|
||||||
) -> Exclude<TDocSet, TDocSetExclude> {
|
) -> Exclude<TDocSet, TDocSetExclude> {
|
||||||
while underlying_docset.doc() != TERMINATED {
|
let state = if excluding_docset.advance() {
|
||||||
let target = underlying_docset.doc();
|
State::ExcludeOne(excluding_docset.doc())
|
||||||
if excluding_docset.seek(target) != target {
|
} else {
|
||||||
// this document is not excluded.
|
State::Finished
|
||||||
break;
|
};
|
||||||
}
|
|
||||||
underlying_docset.advance();
|
|
||||||
}
|
|
||||||
Exclude {
|
Exclude {
|
||||||
underlying_docset,
|
underlying_docset,
|
||||||
excluding_docset,
|
excluding_docset,
|
||||||
|
excluding_state: state,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -47,7 +51,28 @@ where
|
|||||||
/// increasing `doc`.
|
/// increasing `doc`.
|
||||||
fn accept(&mut self) -> bool {
|
fn accept(&mut self) -> bool {
|
||||||
let doc = self.underlying_docset.doc();
|
let doc = self.underlying_docset.doc();
|
||||||
self.excluding_docset.seek(doc) != doc
|
match self.excluding_state {
|
||||||
|
State::ExcludeOne(excluded_doc) => {
|
||||||
|
if doc == excluded_doc {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if excluded_doc > doc {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
match self.excluding_docset.skip_next(doc) {
|
||||||
|
SkipResult::OverStep => {
|
||||||
|
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
|
||||||
|
true
|
||||||
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
self.excluding_state = State::Finished;
|
||||||
|
true
|
||||||
|
}
|
||||||
|
SkipResult::Reached => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
State::Finished => true,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,24 +81,27 @@ where
|
|||||||
TDocSet: DocSet,
|
TDocSet: DocSet,
|
||||||
TDocSetExclude: DocSet,
|
TDocSetExclude: DocSet,
|
||||||
{
|
{
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
while self.underlying_docset.advance() != TERMINATED {
|
while self.underlying_docset.advance() {
|
||||||
if self.accept() {
|
if self.accept() {
|
||||||
return self.doc();
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TERMINATED
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
let underlying_seek_result = self.underlying_docset.seek(target);
|
let underlying_skip_result = self.underlying_docset.skip_next(target);
|
||||||
if underlying_seek_result == TERMINATED {
|
if underlying_skip_result == SkipResult::End {
|
||||||
return TERMINATED;
|
return SkipResult::End;
|
||||||
}
|
}
|
||||||
if self.accept() {
|
if self.accept() {
|
||||||
return underlying_seek_result;
|
underlying_skip_result
|
||||||
|
} else if self.advance() {
|
||||||
|
SkipResult::OverStep
|
||||||
|
} else {
|
||||||
|
SkipResult::End
|
||||||
}
|
}
|
||||||
self.advance()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
@@ -113,9 +141,8 @@ mod tests {
|
|||||||
VecDocSet::from(vec![1, 2, 3, 10, 16, 24]),
|
VecDocSet::from(vec![1, 2, 3, 10, 16, 24]),
|
||||||
);
|
);
|
||||||
let mut els = vec![];
|
let mut els = vec![];
|
||||||
while exclude_scorer.doc() != TERMINATED {
|
while exclude_scorer.advance() {
|
||||||
els.push(exclude_scorer.doc());
|
els.push(exclude_scorer.doc());
|
||||||
exclude_scorer.advance();
|
|
||||||
}
|
}
|
||||||
assert_eq!(els, vec![5, 8, 15]);
|
assert_eq!(els, vec![5, 8, 15]);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,36 +2,10 @@ use crate::query::{AutomatonWeight, Query, Weight};
|
|||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
use crate::TantivyError::InvalidArgument;
|
use crate::TantivyError::InvalidArgument;
|
||||||
use levenshtein_automata::{Distance, LevenshteinAutomatonBuilder, DFA};
|
use levenshtein_automata::{LevenshteinAutomatonBuilder, DFA};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use tantivy_fst::Automaton;
|
|
||||||
|
|
||||||
pub(crate) struct DFAWrapper(pub DFA);
|
|
||||||
|
|
||||||
impl Automaton for DFAWrapper {
|
|
||||||
type State = u32;
|
|
||||||
|
|
||||||
fn start(&self) -> Self::State {
|
|
||||||
self.0.initial_state()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_match(&self, state: &Self::State) -> bool {
|
|
||||||
match self.0.distance(*state) {
|
|
||||||
Distance::Exact(_) => true,
|
|
||||||
Distance::AtLeast(_) => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn can_match(&self, state: &u32) -> bool {
|
|
||||||
*state != levenshtein_automata::SINK_STATE
|
|
||||||
}
|
|
||||||
|
|
||||||
fn accept(&self, state: &Self::State, byte: u8) -> Self::State {
|
|
||||||
self.0.transition(*state, byte)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A range of Levenshtein distances that we will build DFAs for our terms
|
/// A range of Levenshtein distances that we will build DFAs for our terms
|
||||||
/// The computation is exponential, so best keep it to low single digits
|
/// The computation is exponential, so best keep it to low single digits
|
||||||
@@ -117,7 +91,7 @@ impl FuzzyTermQuery {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new Fuzzy Query of the Term prefix
|
/// Creates a new Fuzzy Query that treats transpositions as cost one rather than two
|
||||||
pub fn new_prefix(term: Term, distance: u8, transposition_cost_one: bool) -> FuzzyTermQuery {
|
pub fn new_prefix(term: Term, distance: u8, transposition_cost_one: bool) -> FuzzyTermQuery {
|
||||||
FuzzyTermQuery {
|
FuzzyTermQuery {
|
||||||
term,
|
term,
|
||||||
@@ -127,20 +101,13 @@ impl FuzzyTermQuery {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn specialized_weight(&self) -> crate::Result<AutomatonWeight<DFAWrapper>> {
|
fn specialized_weight(&self) -> crate::Result<AutomatonWeight<DFA>> {
|
||||||
// LEV_BUILDER is a HashMap, whose `get` method returns an Option
|
// LEV_BUILDER is a HashMap, whose `get` method returns an Option
|
||||||
match LEV_BUILDER.get(&(self.distance, false)) {
|
match LEV_BUILDER.get(&(self.distance, false)) {
|
||||||
// Unwrap the option and build the Ok(AutomatonWeight)
|
// Unwrap the option and build the Ok(AutomatonWeight)
|
||||||
Some(automaton_builder) => {
|
Some(automaton_builder) => {
|
||||||
let automaton = if self.prefix {
|
let automaton = automaton_builder.build_dfa(self.term.text());
|
||||||
automaton_builder.build_prefix_dfa(self.term.text())
|
Ok(AutomatonWeight::new(self.term.field(), automaton))
|
||||||
} else {
|
|
||||||
automaton_builder.build_dfa(self.term.text())
|
|
||||||
};
|
|
||||||
Ok(AutomatonWeight::new(
|
|
||||||
self.term.field(),
|
|
||||||
DFAWrapper(automaton),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
None => Err(InvalidArgument(format!(
|
None => Err(InvalidArgument(format!(
|
||||||
"Levenshtein distance of {} is not allowed. Choose a value in the {:?} range",
|
"Levenshtein distance of {} is not allowed. Choose a value in the {:?} range",
|
||||||
@@ -188,8 +155,6 @@ mod test {
|
|||||||
}
|
}
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
// passes because Levenshtein distance is 1 (substitute 'o' with 'a')
|
|
||||||
{
|
{
|
||||||
let term = Term::from_field_text(country_field, "japon");
|
let term = Term::from_field_text(country_field, "japon");
|
||||||
|
|
||||||
@@ -201,29 +166,5 @@ mod test {
|
|||||||
let (score, _) = top_docs[0];
|
let (score, _) = top_docs[0];
|
||||||
assert_nearly_equals(1f32, score);
|
assert_nearly_equals(1f32, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
// fails because non-prefix Levenshtein distance is more than 1 (add 'a' and 'n')
|
|
||||||
{
|
|
||||||
let term = Term::from_field_text(country_field, "jap");
|
|
||||||
|
|
||||||
let fuzzy_query = FuzzyTermQuery::new(term, 1, true);
|
|
||||||
let top_docs = searcher
|
|
||||||
.search(&fuzzy_query, &TopDocs::with_limit(2))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(top_docs.len(), 0, "Expected no document");
|
|
||||||
}
|
|
||||||
|
|
||||||
// passes because prefix Levenshtein distance is 0
|
|
||||||
{
|
|
||||||
let term = Term::from_field_text(country_field, "jap");
|
|
||||||
|
|
||||||
let fuzzy_query = FuzzyTermQuery::new_prefix(term, 1, true);
|
|
||||||
let top_docs = searcher
|
|
||||||
.search(&fuzzy_query, &TopDocs::with_limit(2))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
|
|
||||||
let (score, _) = top_docs[0];
|
|
||||||
assert_nearly_equals(1f32, score);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::query::term_query::TermScorer;
|
use crate::query::term_query::TermScorer;
|
||||||
use crate::query::EmptyScorer;
|
use crate::query::EmptyScorer;
|
||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
@@ -20,14 +20,12 @@ pub fn intersect_scorers(mut scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer> {
|
|||||||
if scorers.len() == 1 {
|
if scorers.len() == 1 {
|
||||||
return scorers.pop().unwrap();
|
return scorers.pop().unwrap();
|
||||||
}
|
}
|
||||||
scorers.sort_by_key(|scorer| scorer.size_hint());
|
|
||||||
let doc = go_to_first_doc(&mut scorers[..]);
|
|
||||||
if doc == TERMINATED {
|
|
||||||
return Box::new(EmptyScorer);
|
|
||||||
}
|
|
||||||
// We know that we have at least 2 elements.
|
// We know that we have at least 2 elements.
|
||||||
let left = scorers.remove(0);
|
let num_docsets = scorers.len();
|
||||||
let right = scorers.remove(0);
|
scorers.sort_by(|left, right| right.size_hint().cmp(&left.size_hint()));
|
||||||
|
let left = scorers.pop().unwrap();
|
||||||
|
let right = scorers.pop().unwrap();
|
||||||
|
scorers.reverse();
|
||||||
let all_term_scorers = [&left, &right]
|
let all_term_scorers = [&left, &right]
|
||||||
.iter()
|
.iter()
|
||||||
.all(|&scorer| scorer.is::<TermScorer>());
|
.all(|&scorer| scorer.is::<TermScorer>());
|
||||||
@@ -36,12 +34,14 @@ pub fn intersect_scorers(mut scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer> {
|
|||||||
left: *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
|
left: *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
|
||||||
right: *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
|
right: *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
|
||||||
others: scorers,
|
others: scorers,
|
||||||
|
num_docsets,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Box::new(Intersection {
|
Box::new(Intersection {
|
||||||
left,
|
left,
|
||||||
right,
|
right,
|
||||||
others: scorers,
|
others: scorers,
|
||||||
|
num_docsets,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,34 +50,22 @@ pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>>
|
|||||||
left: TDocSet,
|
left: TDocSet,
|
||||||
right: TDocSet,
|
right: TDocSet,
|
||||||
others: Vec<TOtherDocSet>,
|
others: Vec<TOtherDocSet>,
|
||||||
}
|
num_docsets: usize,
|
||||||
|
|
||||||
fn go_to_first_doc<TDocSet: DocSet>(docsets: &mut [TDocSet]) -> DocId {
|
|
||||||
let mut candidate = 0;
|
|
||||||
'outer: loop {
|
|
||||||
for docset in docsets.iter_mut() {
|
|
||||||
let seek_doc = docset.seek(candidate);
|
|
||||||
if seek_doc > candidate {
|
|
||||||
candidate = docset.doc();
|
|
||||||
continue 'outer;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet> Intersection<TDocSet, TDocSet> {
|
impl<TDocSet: DocSet> Intersection<TDocSet, TDocSet> {
|
||||||
pub(crate) fn new(mut docsets: Vec<TDocSet>) -> Intersection<TDocSet, TDocSet> {
|
pub(crate) fn new(mut docsets: Vec<TDocSet>) -> Intersection<TDocSet, TDocSet> {
|
||||||
let num_docsets = docsets.len();
|
let num_docsets = docsets.len();
|
||||||
assert!(num_docsets >= 2);
|
assert!(num_docsets >= 2);
|
||||||
docsets.sort_by_key(|docset| docset.size_hint());
|
docsets.sort_by(|left, right| right.size_hint().cmp(&left.size_hint()));
|
||||||
go_to_first_doc(&mut docsets);
|
let left = docsets.pop().unwrap();
|
||||||
let left = docsets.remove(0);
|
let right = docsets.pop().unwrap();
|
||||||
let right = docsets.remove(0);
|
docsets.reverse();
|
||||||
Intersection {
|
Intersection {
|
||||||
left,
|
left,
|
||||||
right,
|
right,
|
||||||
others: docsets,
|
others: docsets,
|
||||||
|
num_docsets,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -92,44 +80,128 @@ impl<TDocSet: DocSet> Intersection<TDocSet, TDocSet> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<TDocSet: DocSet, TOtherDocSet: DocSet> Intersection<TDocSet, TOtherDocSet> {
|
||||||
|
pub(crate) fn docset_mut(&mut self, ord: usize) -> &mut dyn DocSet {
|
||||||
|
match ord {
|
||||||
|
0 => &mut self.left,
|
||||||
|
1 => &mut self.right,
|
||||||
|
n => &mut self.others[n - 2],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOtherDocSet> {
|
impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOtherDocSet> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
let (left, right) = (&mut self.left, &mut self.right);
|
let (left, right) = (&mut self.left, &mut self.right);
|
||||||
let mut candidate = left.advance();
|
|
||||||
|
if !left.advance() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut candidate = left.doc();
|
||||||
|
let mut other_candidate_ord: usize = usize::max_value();
|
||||||
|
|
||||||
'outer: loop {
|
'outer: loop {
|
||||||
// In the first part we look for a document in the intersection
|
// In the first part we look for a document in the intersection
|
||||||
// of the two rarest `DocSet` in the intersection.
|
// of the two rarest `DocSet` in the intersection.
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let right_doc = right.seek(candidate);
|
match right.skip_next(candidate) {
|
||||||
candidate = left.seek(right_doc);
|
SkipResult::Reached => {
|
||||||
if candidate == right_doc {
|
break;
|
||||||
break;
|
}
|
||||||
|
SkipResult::OverStep => {
|
||||||
|
candidate = right.doc();
|
||||||
|
other_candidate_ord = usize::max_value();
|
||||||
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
match left.skip_next(candidate) {
|
||||||
|
SkipResult::Reached => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
SkipResult::OverStep => {
|
||||||
|
candidate = left.doc();
|
||||||
|
other_candidate_ord = usize::max_value();
|
||||||
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
debug_assert_eq!(left.doc(), right.doc());
|
|
||||||
// test the remaining scorers;
|
// test the remaining scorers;
|
||||||
for docset in self.others.iter_mut() {
|
for (ord, docset) in self.others.iter_mut().enumerate() {
|
||||||
let seek_doc = docset.seek(candidate);
|
if ord == other_candidate_ord {
|
||||||
if seek_doc > candidate {
|
continue;
|
||||||
candidate = left.seek(seek_doc);
|
}
|
||||||
continue 'outer;
|
// `candidate_ord` is already at the
|
||||||
|
// right position.
|
||||||
|
//
|
||||||
|
// Calling `skip_next` would advance this docset
|
||||||
|
// and miss it.
|
||||||
|
match docset.skip_next(candidate) {
|
||||||
|
SkipResult::Reached => {}
|
||||||
|
SkipResult::OverStep => {
|
||||||
|
// this is not in the intersection,
|
||||||
|
// let's update our candidate.
|
||||||
|
candidate = docset.doc();
|
||||||
|
match left.skip_next(candidate) {
|
||||||
|
SkipResult::Reached => {
|
||||||
|
other_candidate_ord = ord;
|
||||||
|
}
|
||||||
|
SkipResult::OverStep => {
|
||||||
|
candidate = left.doc();
|
||||||
|
other_candidate_ord = usize::max_value();
|
||||||
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue 'outer;
|
||||||
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
return candidate;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
self.left.seek(target);
|
// We optimize skipping by skipping every single member
|
||||||
let mut docsets: Vec<&mut dyn DocSet> = vec![&mut self.left, &mut self.right];
|
// of the intersection to target.
|
||||||
for docset in &mut self.others {
|
let mut current_target: DocId = target;
|
||||||
docsets.push(docset);
|
let mut current_ord = self.num_docsets;
|
||||||
|
|
||||||
|
'outer: loop {
|
||||||
|
for ord in 0..self.num_docsets {
|
||||||
|
let docset = self.docset_mut(ord);
|
||||||
|
if ord == current_ord {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
match docset.skip_next(current_target) {
|
||||||
|
SkipResult::End => {
|
||||||
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
SkipResult::OverStep => {
|
||||||
|
// update the target
|
||||||
|
// for the remaining members of the intersection.
|
||||||
|
current_target = docset.doc();
|
||||||
|
current_ord = ord;
|
||||||
|
continue 'outer;
|
||||||
|
}
|
||||||
|
SkipResult::Reached => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if target == current_target {
|
||||||
|
return SkipResult::Reached;
|
||||||
|
} else {
|
||||||
|
assert!(current_target > target);
|
||||||
|
return SkipResult::OverStep;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
go_to_first_doc(&mut docsets[..])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
@@ -156,7 +228,7 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::Intersection;
|
use super::Intersection;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::postings::tests::test_skip_against_unoptimized;
|
use crate::postings::tests::test_skip_against_unoptimized;
|
||||||
use crate::query::VecDocSet;
|
use crate::query::VecDocSet;
|
||||||
|
|
||||||
@@ -166,18 +238,20 @@ mod tests {
|
|||||||
let left = VecDocSet::from(vec![1, 3, 9]);
|
let left = VecDocSet::from(vec![1, 3, 9]);
|
||||||
let right = VecDocSet::from(vec![3, 4, 9, 18]);
|
let right = VecDocSet::from(vec![3, 4, 9, 18]);
|
||||||
let mut intersection = Intersection::new(vec![left, right]);
|
let mut intersection = Intersection::new(vec![left, right]);
|
||||||
|
assert!(intersection.advance());
|
||||||
assert_eq!(intersection.doc(), 3);
|
assert_eq!(intersection.doc(), 3);
|
||||||
assert_eq!(intersection.advance(), 9);
|
assert!(intersection.advance());
|
||||||
assert_eq!(intersection.doc(), 9);
|
assert_eq!(intersection.doc(), 9);
|
||||||
assert_eq!(intersection.advance(), TERMINATED);
|
assert!(!intersection.advance());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let a = VecDocSet::from(vec![1, 3, 9]);
|
let a = VecDocSet::from(vec![1, 3, 9]);
|
||||||
let b = VecDocSet::from(vec![3, 4, 9, 18]);
|
let b = VecDocSet::from(vec![3, 4, 9, 18]);
|
||||||
let c = VecDocSet::from(vec![1, 5, 9, 111]);
|
let c = VecDocSet::from(vec![1, 5, 9, 111]);
|
||||||
let mut intersection = Intersection::new(vec![a, b, c]);
|
let mut intersection = Intersection::new(vec![a, b, c]);
|
||||||
|
assert!(intersection.advance());
|
||||||
assert_eq!(intersection.doc(), 9);
|
assert_eq!(intersection.doc(), 9);
|
||||||
assert_eq!(intersection.advance(), TERMINATED);
|
assert!(!intersection.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,8 +260,8 @@ mod tests {
|
|||||||
let left = VecDocSet::from(vec![0]);
|
let left = VecDocSet::from(vec![0]);
|
||||||
let right = VecDocSet::from(vec![0]);
|
let right = VecDocSet::from(vec![0]);
|
||||||
let mut intersection = Intersection::new(vec![left, right]);
|
let mut intersection = Intersection::new(vec![left, right]);
|
||||||
|
assert!(intersection.advance());
|
||||||
assert_eq!(intersection.doc(), 0);
|
assert_eq!(intersection.doc(), 0);
|
||||||
assert_eq!(intersection.advance(), TERMINATED);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -195,7 +269,7 @@ mod tests {
|
|||||||
let left = VecDocSet::from(vec![0, 1, 2, 4]);
|
let left = VecDocSet::from(vec![0, 1, 2, 4]);
|
||||||
let right = VecDocSet::from(vec![2, 5]);
|
let right = VecDocSet::from(vec![2, 5]);
|
||||||
let mut intersection = Intersection::new(vec![left, right]);
|
let mut intersection = Intersection::new(vec![left, right]);
|
||||||
assert_eq!(intersection.seek(2), 2);
|
assert_eq!(intersection.skip_next(2), SkipResult::Reached);
|
||||||
assert_eq!(intersection.doc(), 2);
|
assert_eq!(intersection.doc(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +312,7 @@ mod tests {
|
|||||||
let a = VecDocSet::from(vec![1, 3]);
|
let a = VecDocSet::from(vec![1, 3]);
|
||||||
let b = VecDocSet::from(vec![1, 4]);
|
let b = VecDocSet::from(vec![1, 4]);
|
||||||
let c = VecDocSet::from(vec![3, 9]);
|
let c = VecDocSet::from(vec![3, 9]);
|
||||||
let intersection = Intersection::new(vec![a, b, c]);
|
let mut intersection = Intersection::new(vec![a, b, c]);
|
||||||
assert_eq!(intersection.doc(), TERMINATED);
|
assert!(!intersection.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
/*! Query Module */
|
/*!
|
||||||
|
Query
|
||||||
|
*/
|
||||||
|
|
||||||
mod all_query;
|
mod all_query;
|
||||||
mod automaton_weight;
|
mod automaton_weight;
|
||||||
@@ -40,8 +42,6 @@ pub use self::boost_query::BoostQuery;
|
|||||||
pub use self::empty_query::{EmptyQuery, EmptyScorer, EmptyWeight};
|
pub use self::empty_query::{EmptyQuery, EmptyScorer, EmptyWeight};
|
||||||
pub use self::exclude::Exclude;
|
pub use self::exclude::Exclude;
|
||||||
pub use self::explanation::Explanation;
|
pub use self::explanation::Explanation;
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) use self::fuzzy_query::DFAWrapper;
|
|
||||||
pub use self::fuzzy_query::FuzzyTermQuery;
|
pub use self::fuzzy_query::FuzzyTermQuery;
|
||||||
pub use self::intersection::intersect_scorers;
|
pub use self::intersection::intersect_scorers;
|
||||||
pub use self::phrase_query::PhraseQuery;
|
pub use self::phrase_query::PhraseQuery;
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ pub mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
|
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
|
use crate::error::TantivyError;
|
||||||
use crate::schema::{Schema, Term, TEXT};
|
use crate::schema::{Schema, Term, TEXT};
|
||||||
use crate::tests::assert_nearly_equals;
|
use crate::tests::assert_nearly_equals;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
@@ -60,8 +61,8 @@ pub mod tests {
|
|||||||
.map(|docaddr| docaddr.1)
|
.map(|docaddr| docaddr.1)
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
};
|
};
|
||||||
assert_eq!(test_query(vec!["a", "b"]), vec![1, 2, 3, 4]);
|
|
||||||
assert_eq!(test_query(vec!["a", "b", "c"]), vec![2, 4]);
|
assert_eq!(test_query(vec!["a", "b", "c"]), vec![2, 4]);
|
||||||
|
assert_eq!(test_query(vec!["a", "b"]), vec![1, 2, 3, 4]);
|
||||||
assert_eq!(test_query(vec!["b", "b"]), vec![0, 1]);
|
assert_eq!(test_query(vec!["b", "b"]), vec![0, 1]);
|
||||||
assert!(test_query(vec!["g", "ewrwer"]).is_empty());
|
assert!(test_query(vec!["g", "ewrwer"]).is_empty());
|
||||||
assert!(test_query(vec!["g", "a"]).is_empty());
|
assert!(test_query(vec!["g", "a"]).is_empty());
|
||||||
@@ -126,16 +127,21 @@ pub mod tests {
|
|||||||
Term::from_field_text(text_field, "a"),
|
Term::from_field_text(text_field, "a"),
|
||||||
Term::from_field_text(text_field, "b"),
|
Term::from_field_text(text_field, "b"),
|
||||||
]);
|
]);
|
||||||
|
match searcher
|
||||||
let search_result = searcher
|
|
||||||
.search(&phrase_query, &TEST_COLLECTOR_WITH_SCORE)
|
.search(&phrase_query, &TEST_COLLECTOR_WITH_SCORE)
|
||||||
.map(|_| ());
|
.map(|_| ())
|
||||||
assert!(matches!(
|
.unwrap_err()
|
||||||
search_result,
|
{
|
||||||
Err(crate::TantivyError::SchemaError(msg))
|
TantivyError::SchemaError(ref msg) => {
|
||||||
if msg == "Applied phrase query on field \"text\", which does not have positions \
|
assert_eq!(
|
||||||
indexed"
|
"Applied phrase query on field \"text\", which does not have positions indexed",
|
||||||
));
|
msg.as_str()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
panic!("Should have returned an error");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::postings::Postings;
|
use crate::postings::Postings;
|
||||||
use crate::query::bm25::BM25Weight;
|
use crate::query::bm25::BM25Weight;
|
||||||
@@ -25,12 +25,12 @@ impl<TPostings: Postings> PostingsWithOffset<TPostings> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<TPostings: Postings> DocSet for PostingsWithOffset<TPostings> {
|
impl<TPostings: Postings> DocSet for PostingsWithOffset<TPostings> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
self.postings.advance()
|
self.postings.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
self.postings.seek(target)
|
self.postings.skip_next(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
@@ -149,7 +149,7 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
|
|||||||
PostingsWithOffset::new(postings, (max_offset - offset) as u32)
|
PostingsWithOffset::new(postings, (max_offset - offset) as u32)
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let mut scorer = PhraseScorer {
|
PhraseScorer {
|
||||||
intersection_docset: Intersection::new(postings_with_offsets),
|
intersection_docset: Intersection::new(postings_with_offsets),
|
||||||
num_terms: num_docsets,
|
num_terms: num_docsets,
|
||||||
left: Vec::with_capacity(100),
|
left: Vec::with_capacity(100),
|
||||||
@@ -158,11 +158,7 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
|
|||||||
similarity_weight,
|
similarity_weight,
|
||||||
fieldnorm_reader,
|
fieldnorm_reader,
|
||||||
score_needed,
|
score_needed,
|
||||||
};
|
|
||||||
if scorer.doc() != TERMINATED && !scorer.phrase_match() {
|
|
||||||
scorer.advance();
|
|
||||||
}
|
}
|
||||||
scorer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn phrase_count(&self) -> u32 {
|
pub fn phrase_count(&self) -> u32 {
|
||||||
@@ -229,21 +225,31 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
|
impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
loop {
|
while self.intersection_docset.advance() {
|
||||||
let doc = self.intersection_docset.advance();
|
if self.phrase_match() {
|
||||||
if doc == TERMINATED || self.phrase_match() {
|
return true;
|
||||||
return doc;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
let doc = self.intersection_docset.seek(target);
|
if self.intersection_docset.skip_next(target) == SkipResult::End {
|
||||||
if doc == TERMINATED || self.phrase_match() {
|
return SkipResult::End;
|
||||||
return doc;
|
}
|
||||||
|
if self.phrase_match() {
|
||||||
|
if self.doc() == target {
|
||||||
|
return SkipResult::Reached;
|
||||||
|
} else {
|
||||||
|
return SkipResult::OverStep;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.advance() {
|
||||||
|
SkipResult::OverStep
|
||||||
|
} else {
|
||||||
|
SkipResult::End
|
||||||
}
|
}
|
||||||
self.advance()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ use crate::query::Weight;
|
|||||||
use crate::query::{EmptyScorer, Explanation};
|
use crate::query::{EmptyScorer, Explanation};
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::Result;
|
|
||||||
use crate::{DocId, DocSet};
|
use crate::{DocId, DocSet};
|
||||||
|
use crate::{Result, SkipResult};
|
||||||
|
|
||||||
pub struct PhraseWeight {
|
pub struct PhraseWeight {
|
||||||
phrase_terms: Vec<(usize, Term)>,
|
phrase_terms: Vec<(usize, Term)>,
|
||||||
@@ -99,7 +99,7 @@ impl Weight for PhraseWeight {
|
|||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
let mut scorer = scorer_opt.unwrap();
|
let mut scorer = scorer_opt.unwrap();
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
let fieldnorm_reader = self.fieldnorm_reader(reader);
|
let fieldnorm_reader = self.fieldnorm_reader(reader);
|
||||||
@@ -114,7 +114,6 @@ impl Weight for PhraseWeight {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::super::tests::create_index;
|
use super::super::tests::create_index;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use crate::query::PhraseQuery;
|
use crate::query::PhraseQuery;
|
||||||
use crate::{DocSet, Term};
|
use crate::{DocSet, Term};
|
||||||
|
|
||||||
@@ -133,11 +132,12 @@ mod tests {
|
|||||||
.phrase_scorer(searcher.segment_reader(0u32), 1.0f32)
|
.phrase_scorer(searcher.segment_reader(0u32), 1.0f32)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
assert!(phrase_scorer.advance());
|
||||||
assert_eq!(phrase_scorer.doc(), 1);
|
assert_eq!(phrase_scorer.doc(), 1);
|
||||||
assert_eq!(phrase_scorer.phrase_count(), 2);
|
assert_eq!(phrase_scorer.phrase_count(), 2);
|
||||||
assert_eq!(phrase_scorer.advance(), 2);
|
assert!(phrase_scorer.advance());
|
||||||
assert_eq!(phrase_scorer.doc(), 2);
|
assert_eq!(phrase_scorer.doc(), 2);
|
||||||
assert_eq!(phrase_scorer.phrase_count(), 1);
|
assert_eq!(phrase_scorer.phrase_count(), 1);
|
||||||
assert_eq!(phrase_scorer.advance(), TERMINATED);
|
assert!(!phrase_scorer.advance());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -113,9 +113,8 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
|
|||||||
/// The language covered by the current parser is extremely simple.
|
/// The language covered by the current parser is extremely simple.
|
||||||
///
|
///
|
||||||
/// * simple terms: "e.g.: `Barack Obama` are simply tokenized using
|
/// * simple terms: "e.g.: `Barack Obama` are simply tokenized using
|
||||||
/// tantivy's [`SimpleTokenizer`](tantivy::tokenizer::SimpleTokenizer), hence
|
/// tantivy's `StandardTokenizer`, hence becoming `["barack", "obama"]`.
|
||||||
/// becoming `["barack", "obama"]`. The terms are then searched within
|
/// The terms are then searched within the default terms of the query parser.
|
||||||
/// the default terms of the query parser.
|
|
||||||
///
|
///
|
||||||
/// e.g. If `body` and `title` are default fields, our example terms are
|
/// e.g. If `body` and `title` are default fields, our example terms are
|
||||||
/// `["title:barack", "body:barack", "title:obama", "body:obama"]`.
|
/// `["title:barack", "body:barack", "title:obama", "body:obama"]`.
|
||||||
@@ -175,16 +174,6 @@ pub struct QueryParser {
|
|||||||
boost: HashMap<Field, f32>,
|
boost: HashMap<Field, f32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn all_negative(ast: &LogicalAST) -> bool {
|
|
||||||
match ast {
|
|
||||||
LogicalAST::Leaf(_) => false,
|
|
||||||
LogicalAST::Boost(ref child_ast, _) => all_negative(&*child_ast),
|
|
||||||
LogicalAST::Clause(children) => children
|
|
||||||
.iter()
|
|
||||||
.all(|(ref occur, child)| (*occur == Occur::MustNot) || all_negative(child)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QueryParser {
|
impl QueryParser {
|
||||||
/// Creates a `QueryParser`, given
|
/// Creates a `QueryParser`, given
|
||||||
/// * schema - index Schema
|
/// * schema - index Schema
|
||||||
@@ -264,13 +253,8 @@ impl QueryParser {
|
|||||||
&self,
|
&self,
|
||||||
user_input_ast: UserInputAST,
|
user_input_ast: UserInputAST,
|
||||||
) -> Result<LogicalAST, QueryParserError> {
|
) -> Result<LogicalAST, QueryParserError> {
|
||||||
let ast = self.compute_logical_ast_with_occur(user_input_ast)?;
|
let (occur, ast) = self.compute_logical_ast_with_occur(user_input_ast)?;
|
||||||
if let LogicalAST::Clause(children) = &ast {
|
if occur == Occur::MustNot {
|
||||||
if children.is_empty() {
|
|
||||||
return Ok(ast);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if all_negative(&ast) {
|
|
||||||
return Err(QueryParserError::AllButQueryForbidden);
|
return Err(QueryParserError::AllButQueryForbidden);
|
||||||
}
|
}
|
||||||
Ok(ast)
|
Ok(ast)
|
||||||
@@ -426,23 +410,31 @@ impl QueryParser {
|
|||||||
fn compute_logical_ast_with_occur(
|
fn compute_logical_ast_with_occur(
|
||||||
&self,
|
&self,
|
||||||
user_input_ast: UserInputAST,
|
user_input_ast: UserInputAST,
|
||||||
) -> Result<LogicalAST, QueryParserError> {
|
) -> Result<(Occur, LogicalAST), QueryParserError> {
|
||||||
match user_input_ast {
|
match user_input_ast {
|
||||||
UserInputAST::Clause(sub_queries) => {
|
UserInputAST::Clause(sub_queries) => {
|
||||||
let default_occur = self.default_occur();
|
let default_occur = self.default_occur();
|
||||||
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
|
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
|
||||||
for (occur_opt, sub_ast) in sub_queries {
|
for sub_query in sub_queries {
|
||||||
let sub_ast = self.compute_logical_ast_with_occur(sub_ast)?;
|
let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?;
|
||||||
let occur = occur_opt.unwrap_or(default_occur);
|
let new_occur = Occur::compose(default_occur, occur);
|
||||||
logical_sub_queries.push((occur, sub_ast));
|
logical_sub_queries.push((new_occur, sub_ast));
|
||||||
}
|
}
|
||||||
Ok(LogicalAST::Clause(logical_sub_queries))
|
Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries)))
|
||||||
|
}
|
||||||
|
UserInputAST::Unary(left_occur, subquery) => {
|
||||||
|
let (right_occur, logical_sub_queries) =
|
||||||
|
self.compute_logical_ast_with_occur(*subquery)?;
|
||||||
|
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries))
|
||||||
}
|
}
|
||||||
UserInputAST::Boost(ast, boost) => {
|
UserInputAST::Boost(ast, boost) => {
|
||||||
let ast = self.compute_logical_ast_with_occur(*ast)?;
|
let (occur, ast_without_occur) = self.compute_logical_ast_with_occur(*ast)?;
|
||||||
Ok(ast.boost(boost))
|
Ok((occur, ast_without_occur.boost(boost)))
|
||||||
|
}
|
||||||
|
UserInputAST::Leaf(leaf) => {
|
||||||
|
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
|
||||||
|
Ok((Occur::Should, result_ast))
|
||||||
}
|
}
|
||||||
UserInputAST::Leaf(leaf) => self.compute_logical_ast_from_leaf(*leaf),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -790,20 +782,6 @@ mod test {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_query_to_ast_ab_c() {
|
|
||||||
test_parse_query_to_logical_ast_helper(
|
|
||||||
"(+title:a +title:b) title:c",
|
|
||||||
"((+Term(field=0,bytes=[97]) +Term(field=0,bytes=[98])) Term(field=0,bytes=[99]))",
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
test_parse_query_to_logical_ast_helper(
|
|
||||||
"(+title:a +title:b) title:c",
|
|
||||||
"(+(+Term(field=0,bytes=[97]) +Term(field=0,bytes=[98])) +Term(field=0,bytes=[99]))",
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_parse_query_to_ast_single_term() {
|
pub fn test_parse_query_to_ast_single_term() {
|
||||||
test_parse_query_to_logical_ast_helper(
|
test_parse_query_to_logical_ast_helper(
|
||||||
@@ -823,13 +801,11 @@ mod test {
|
|||||||
Term(field=1,bytes=[116, 105, 116, 105])))",
|
Term(field=1,bytes=[116, 105, 116, 105])))",
|
||||||
false,
|
false,
|
||||||
);
|
);
|
||||||
}
|
assert_eq!(
|
||||||
|
parse_query_to_logical_ast("-title:toto", false)
|
||||||
#[test]
|
.err()
|
||||||
fn test_single_negative_term() {
|
.unwrap(),
|
||||||
assert_matches!(
|
QueryParserError::AllButQueryForbidden
|
||||||
parse_query_to_logical_ast("-title:toto", false),
|
|
||||||
Err(QueryParserError::AllButQueryForbidden)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -989,18 +965,6 @@ mod test {
|
|||||||
assert!(query_parser.parse_query("with_stop_words:the").is_ok());
|
assert!(query_parser.parse_query("with_stop_words:the").is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_parse_query_single_negative_term_through_error() {
|
|
||||||
assert_matches!(
|
|
||||||
parse_query_to_logical_ast("-title:toto", true),
|
|
||||||
Err(QueryParserError::AllButQueryForbidden)
|
|
||||||
);
|
|
||||||
assert_matches!(
|
|
||||||
parse_query_to_logical_ast("-title:toto", false),
|
|
||||||
Err(QueryParserError::AllButQueryForbidden)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_parse_query_to_ast_conjunction() {
|
pub fn test_parse_query_to_ast_conjunction() {
|
||||||
test_parse_query_to_logical_ast_helper(
|
test_parse_query_to_logical_ast_helper(
|
||||||
@@ -1020,6 +984,12 @@ mod test {
|
|||||||
Term(field=1,bytes=[116, 105, 116, 105])))",
|
Term(field=1,bytes=[116, 105, 116, 105])))",
|
||||||
true,
|
true,
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
parse_query_to_logical_ast("-title:toto", true)
|
||||||
|
.err()
|
||||||
|
.unwrap(),
|
||||||
|
QueryParserError::AllButQueryForbidden
|
||||||
|
);
|
||||||
test_parse_query_to_logical_ast_helper(
|
test_parse_query_to_logical_ast_helper(
|
||||||
"title:a b",
|
"title:a b",
|
||||||
"(+Term(field=0,bytes=[97]) \
|
"(+Term(field=0,bytes=[97]) \
|
||||||
@@ -1043,26 +1013,4 @@ mod test {
|
|||||||
false
|
false
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_and_default_regardless_of_default_conjunctive() {
|
|
||||||
for &default_conjunction in &[false, true] {
|
|
||||||
test_parse_query_to_logical_ast_helper(
|
|
||||||
"title:a AND title:b",
|
|
||||||
"(+Term(field=0,bytes=[97]) +Term(field=0,bytes=[98]))",
|
|
||||||
default_conjunction,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_or_default_conjunctive() {
|
|
||||||
for &default_conjunction in &[false, true] {
|
|
||||||
test_parse_query_to_logical_ast_helper(
|
|
||||||
"title:a OR title:b",
|
|
||||||
"(Term(field=0,bytes=[97]) Term(field=0,bytes=[98]))",
|
|
||||||
default_conjunction,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use crate::schema::Type;
|
|||||||
use crate::schema::{Field, IndexRecordOption, Term};
|
use crate::schema::{Field, IndexRecordOption, Term};
|
||||||
use crate::termdict::{TermDictionary, TermStreamer};
|
use crate::termdict::{TermDictionary, TermStreamer};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Result;
|
use crate::{Result, SkipResult};
|
||||||
use std::collections::Bound;
|
use std::collections::Bound;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
@@ -300,13 +300,10 @@ impl Weight for RangeWeight {
|
|||||||
let term_info = term_range.value();
|
let term_info = term_range.value();
|
||||||
let mut block_segment_postings = inverted_index
|
let mut block_segment_postings = inverted_index
|
||||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
||||||
loop {
|
while block_segment_postings.advance() {
|
||||||
for &doc in block_segment_postings.docs() {
|
for &doc in block_segment_postings.docs() {
|
||||||
doc_bitset.insert(doc);
|
doc_bitset.insert(doc);
|
||||||
}
|
}
|
||||||
if !block_segment_postings.advance() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||||
@@ -315,7 +312,7 @@ impl Weight for RangeWeight {
|
|||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0f32)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
Ok(Explanation::new("RangeQuery", 1.0f32))
|
Ok(Explanation::new("RangeQuery", 1.0f32))
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
use crate::docset::DocSet;
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::query::score_combiner::ScoreCombiner;
|
use crate::query::score_combiner::ScoreCombiner;
|
||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
|
use std::cmp::Ordering;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
/// Given a required scorer and an optional scorer
|
/// Given a required scorer and an optional scorer
|
||||||
@@ -16,6 +17,7 @@ pub struct RequiredOptionalScorer<TReqScorer, TOptScorer, TScoreCombiner> {
|
|||||||
req_scorer: TReqScorer,
|
req_scorer: TReqScorer,
|
||||||
opt_scorer: TOptScorer,
|
opt_scorer: TOptScorer,
|
||||||
score_cache: Option<Score>,
|
score_cache: Option<Score>,
|
||||||
|
opt_finished: bool,
|
||||||
_phantom: PhantomData<TScoreCombiner>,
|
_phantom: PhantomData<TScoreCombiner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,12 +29,14 @@ where
|
|||||||
/// Creates a new `RequiredOptionalScorer`.
|
/// Creates a new `RequiredOptionalScorer`.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
req_scorer: TReqScorer,
|
req_scorer: TReqScorer,
|
||||||
opt_scorer: TOptScorer,
|
mut opt_scorer: TOptScorer,
|
||||||
) -> RequiredOptionalScorer<TReqScorer, TOptScorer, TScoreCombiner> {
|
) -> RequiredOptionalScorer<TReqScorer, TOptScorer, TScoreCombiner> {
|
||||||
|
let opt_finished = !opt_scorer.advance();
|
||||||
RequiredOptionalScorer {
|
RequiredOptionalScorer {
|
||||||
req_scorer,
|
req_scorer,
|
||||||
opt_scorer,
|
opt_scorer,
|
||||||
score_cache: None,
|
score_cache: None,
|
||||||
|
opt_finished,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -44,7 +48,7 @@ where
|
|||||||
TReqScorer: DocSet,
|
TReqScorer: DocSet,
|
||||||
TOptScorer: DocSet,
|
TOptScorer: DocSet,
|
||||||
{
|
{
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
self.score_cache = None;
|
self.score_cache = None;
|
||||||
self.req_scorer.advance()
|
self.req_scorer.advance()
|
||||||
}
|
}
|
||||||
@@ -72,8 +76,22 @@ where
|
|||||||
let doc = self.doc();
|
let doc = self.doc();
|
||||||
let mut score_combiner = TScoreCombiner::default();
|
let mut score_combiner = TScoreCombiner::default();
|
||||||
score_combiner.update(&mut self.req_scorer);
|
score_combiner.update(&mut self.req_scorer);
|
||||||
if self.opt_scorer.seek(doc) == doc {
|
if !self.opt_finished {
|
||||||
score_combiner.update(&mut self.opt_scorer);
|
match self.opt_scorer.doc().cmp(&doc) {
|
||||||
|
Ordering::Greater => {}
|
||||||
|
Ordering::Equal => {
|
||||||
|
score_combiner.update(&mut self.opt_scorer);
|
||||||
|
}
|
||||||
|
Ordering::Less => match self.opt_scorer.skip_next(doc) {
|
||||||
|
SkipResult::Reached => {
|
||||||
|
score_combiner.update(&mut self.opt_scorer);
|
||||||
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
self.opt_finished = true;
|
||||||
|
}
|
||||||
|
SkipResult::OverStep => {}
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let score = score_combiner.score();
|
let score = score_combiner.score();
|
||||||
self.score_cache = Some(score);
|
self.score_cache = Some(score);
|
||||||
@@ -84,7 +102,7 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::RequiredOptionalScorer;
|
use super::RequiredOptionalScorer;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::postings::tests::test_skip_against_unoptimized;
|
use crate::postings::tests::test_skip_against_unoptimized;
|
||||||
use crate::query::score_combiner::{DoNothingCombiner, SumCombiner};
|
use crate::query::score_combiner::{DoNothingCombiner, SumCombiner};
|
||||||
use crate::query::ConstScorer;
|
use crate::query::ConstScorer;
|
||||||
@@ -101,9 +119,8 @@ mod tests {
|
|||||||
ConstScorer::from(VecDocSet::from(vec![])),
|
ConstScorer::from(VecDocSet::from(vec![])),
|
||||||
);
|
);
|
||||||
let mut docs = vec![];
|
let mut docs = vec![];
|
||||||
while reqoptscorer.doc() != TERMINATED {
|
while reqoptscorer.advance() {
|
||||||
docs.push(reqoptscorer.doc());
|
docs.push(reqoptscorer.doc());
|
||||||
reqoptscorer.advance();
|
|
||||||
}
|
}
|
||||||
assert_eq!(docs, req);
|
assert_eq!(docs, req);
|
||||||
}
|
}
|
||||||
@@ -116,45 +133,46 @@ mod tests {
|
|||||||
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0f32),
|
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0f32),
|
||||||
);
|
);
|
||||||
{
|
{
|
||||||
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 1);
|
assert_eq!(reqoptscorer.doc(), 1);
|
||||||
assert_eq!(reqoptscorer.score(), 2f32);
|
assert_eq!(reqoptscorer.score(), 2f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 3);
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 3);
|
assert_eq!(reqoptscorer.doc(), 3);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 7);
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 7);
|
assert_eq!(reqoptscorer.doc(), 7);
|
||||||
assert_eq!(reqoptscorer.score(), 2f32);
|
assert_eq!(reqoptscorer.score(), 2f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 8);
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 8);
|
assert_eq!(reqoptscorer.doc(), 8);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 9);
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 9);
|
assert_eq!(reqoptscorer.doc(), 9);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 10);
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 10);
|
assert_eq!(reqoptscorer.doc(), 10);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 13);
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 13);
|
assert_eq!(reqoptscorer.doc(), 13);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1f32);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 15);
|
assert!(reqoptscorer.advance());
|
||||||
assert_eq!(reqoptscorer.doc(), 15);
|
assert_eq!(reqoptscorer.doc(), 15);
|
||||||
assert_eq!(reqoptscorer.score(), 2f32);
|
assert_eq!(reqoptscorer.score(), 2f32);
|
||||||
}
|
}
|
||||||
assert_eq!(reqoptscorer.advance(), TERMINATED);
|
assert!(!reqoptscorer.advance());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use crate::docset::DocSet;
|
use crate::common::BitSet;
|
||||||
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use downcast_rs::impl_downcast;
|
use downcast_rs::impl_downcast;
|
||||||
@@ -12,6 +13,14 @@ pub trait Scorer: downcast_rs::Downcast + DocSet + 'static {
|
|||||||
///
|
///
|
||||||
/// This method will perform a bit of computation and is not cached.
|
/// This method will perform a bit of computation and is not cached.
|
||||||
fn score(&mut self) -> Score;
|
fn score(&mut self) -> Score;
|
||||||
|
|
||||||
|
/// Iterates through all of the document matched by the DocSet
|
||||||
|
/// `DocSet` and push the scored documents to the collector.
|
||||||
|
fn for_each(&mut self, callback: &mut dyn FnMut(DocId, Score)) {
|
||||||
|
while self.advance() {
|
||||||
|
callback(self.doc(), self.score());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl_downcast!(Scorer);
|
impl_downcast!(Scorer);
|
||||||
@@ -20,6 +29,11 @@ impl Scorer for Box<dyn Scorer> {
|
|||||||
fn score(&mut self) -> Score {
|
fn score(&mut self) -> Score {
|
||||||
self.deref_mut().score()
|
self.deref_mut().score()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn for_each(&mut self, callback: &mut dyn FnMut(DocId, Score)) {
|
||||||
|
let scorer = self.deref_mut();
|
||||||
|
scorer.for_each(callback);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wraps a `DocSet` and simply returns a constant `Scorer`.
|
/// Wraps a `DocSet` and simply returns a constant `Scorer`.
|
||||||
@@ -47,12 +61,12 @@ impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
self.docset.advance()
|
self.docset.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
self.docset.seek(target)
|
self.docset.skip_next(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||||
@@ -66,6 +80,10 @@ impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
|||||||
fn size_hint(&self) -> u32 {
|
fn size_hint(&self) -> u32 {
|
||||||
self.docset.size_hint()
|
self.docset.size_hint()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn append_to_bitset(&mut self, bitset: &mut BitSet) {
|
||||||
|
self.docset.append_to_bitset(bitset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
|
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
|
||||||
|
|||||||
@@ -26,8 +26,10 @@ mod tests {
|
|||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let doc = doc!(text_field => "a");
|
{
|
||||||
index_writer.add_document(doc);
|
let doc = doc!(text_field => "a");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
@@ -38,6 +40,7 @@ mod tests {
|
|||||||
let term_weight = term_query.weight(&searcher, true).unwrap();
|
let term_weight = term_query.weight(&searcher, true).unwrap();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32).unwrap();
|
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32).unwrap();
|
||||||
|
assert!(term_scorer.advance());
|
||||||
assert_eq!(term_scorer.doc(), 0);
|
assert_eq!(term_scorer.doc(), 0);
|
||||||
assert_eq!(term_scorer.score(), 0.28768212);
|
assert_eq!(term_scorer.score(), 0.28768212);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::docset::DocSet;
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::query::{Explanation, Scorer};
|
use crate::query::{Explanation, Scorer};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
@@ -45,12 +45,12 @@ impl TermScorer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DocSet for TermScorer {
|
impl DocSet for TermScorer {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
self.postings.advance()
|
self.postings.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
self.postings.seek(target)
|
self.postings.skip_next(target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
|
|||||||
@@ -4,13 +4,12 @@ use crate::docset::DocSet;
|
|||||||
use crate::postings::SegmentPostings;
|
use crate::postings::SegmentPostings;
|
||||||
use crate::query::bm25::BM25Weight;
|
use crate::query::bm25::BM25Weight;
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
|
|
||||||
use crate::query::Weight;
|
use crate::query::Weight;
|
||||||
use crate::query::{Explanation, Scorer};
|
use crate::query::{Explanation, Scorer};
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::Result;
|
use crate::DocId;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
use crate::{DocId, Score};
|
use crate::{Result, SkipResult};
|
||||||
|
|
||||||
pub struct TermWeight {
|
pub struct TermWeight {
|
||||||
term: Term,
|
term: Term,
|
||||||
@@ -26,7 +25,7 @@ impl Weight for TermWeight {
|
|||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
||||||
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
|
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.skip_next(doc) != SkipResult::Reached {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
Ok(scorer.explain())
|
Ok(scorer.explain())
|
||||||
@@ -44,39 +43,6 @@ impl Weight for TermWeight {
|
|||||||
.unwrap_or(0))
|
.unwrap_or(0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterates through all of the document matched by the DocSet
|
|
||||||
/// `DocSet` and push the scored documents to the collector.
|
|
||||||
fn for_each(
|
|
||||||
&self,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score),
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
|
|
||||||
for_each_scorer(&mut scorer, callback);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
|
||||||
/// is exceeding a given threshold.
|
|
||||||
///
|
|
||||||
/// This method is useful for the TopDocs collector.
|
|
||||||
/// For all docsets, the blanket implementation has the benefit
|
|
||||||
/// of prefiltering (doc, score) pairs, avoiding the
|
|
||||||
/// virtual dispatch cost.
|
|
||||||
///
|
|
||||||
/// More importantly, it makes it possible for scorers to implement
|
|
||||||
/// important optimization (e.g. BlockWAND for union).
|
|
||||||
fn for_each_pruning(
|
|
||||||
&self,
|
|
||||||
threshold: f32,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
|
||||||
for_each_pruning_scorer(&mut scorer, threshold, callback);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TermWeight {
|
impl TermWeight {
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
use crate::common::TinySet;
|
use crate::common::TinySet;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
|
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
|
||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
const HORIZON_NUM_TINYBITSETS: usize = 64;
|
const HORIZON_NUM_TINYBITSETS: usize = 64;
|
||||||
const HORIZON: u32 = 64u32 * HORIZON_NUM_TINYBITSETS as u32;
|
const HORIZON: u32 = 64u32 * HORIZON_NUM_TINYBITSETS as u32;
|
||||||
@@ -46,9 +47,17 @@ where
|
|||||||
fn from(docsets: Vec<TScorer>) -> Union<TScorer, TScoreCombiner> {
|
fn from(docsets: Vec<TScorer>) -> Union<TScorer, TScoreCombiner> {
|
||||||
let non_empty_docsets: Vec<TScorer> = docsets
|
let non_empty_docsets: Vec<TScorer> = docsets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|docset| docset.doc() != TERMINATED)
|
.flat_map(
|
||||||
|
|mut docset| {
|
||||||
|
if docset.advance() {
|
||||||
|
Some(docset)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
.collect();
|
.collect();
|
||||||
let mut union = Union {
|
Union {
|
||||||
docsets: non_empty_docsets,
|
docsets: non_empty_docsets,
|
||||||
bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]),
|
bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]),
|
||||||
scores: Box::new([TScoreCombiner::default(); HORIZON as usize]),
|
scores: Box::new([TScoreCombiner::default(); HORIZON as usize]),
|
||||||
@@ -56,13 +65,7 @@ where
|
|||||||
offset: 0,
|
offset: 0,
|
||||||
doc: 0,
|
doc: 0,
|
||||||
score: 0f32,
|
score: 0f32,
|
||||||
};
|
|
||||||
if union.refill() {
|
|
||||||
union.advance();
|
|
||||||
} else {
|
|
||||||
union.doc = TERMINATED;
|
|
||||||
}
|
}
|
||||||
union
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,7 +86,7 @@ fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
|
|||||||
let delta = doc - min_doc;
|
let delta = doc - min_doc;
|
||||||
bitsets[(delta / 64) as usize].insert_mut(delta % 64u32);
|
bitsets[(delta / 64) as usize].insert_mut(delta % 64u32);
|
||||||
score_combiner[delta as usize].update(scorer);
|
score_combiner[delta as usize].update(scorer);
|
||||||
if scorer.advance() == TERMINATED {
|
if !scorer.advance() {
|
||||||
// remove the docset, it has been entirely consumed.
|
// remove the docset, it has been entirely consumed.
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -96,7 +99,6 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombin
|
|||||||
if let Some(min_doc) = self.docsets.iter().map(DocSet::doc).min() {
|
if let Some(min_doc) = self.docsets.iter().map(DocSet::doc).min() {
|
||||||
self.offset = min_doc;
|
self.offset = min_doc;
|
||||||
self.cursor = 0;
|
self.cursor = 0;
|
||||||
self.doc = min_doc;
|
|
||||||
refill(
|
refill(
|
||||||
&mut self.docsets,
|
&mut self.docsets,
|
||||||
&mut *self.bitsets,
|
&mut *self.bitsets,
|
||||||
@@ -131,23 +133,30 @@ where
|
|||||||
TScorer: Scorer,
|
TScorer: Scorer,
|
||||||
TScoreCombiner: ScoreCombiner,
|
TScoreCombiner: ScoreCombiner,
|
||||||
{
|
{
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
if self.advance_buffered() {
|
if self.advance_buffered() {
|
||||||
return self.doc;
|
return true;
|
||||||
}
|
}
|
||||||
if !self.refill() {
|
if self.refill() {
|
||||||
self.doc = TERMINATED;
|
self.advance();
|
||||||
return TERMINATED;
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
}
|
}
|
||||||
if !self.advance_buffered() {
|
|
||||||
return TERMINATED;
|
|
||||||
}
|
|
||||||
self.doc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn skip_next(&mut self, target: DocId) -> SkipResult {
|
||||||
if self.doc >= target {
|
if !self.advance() {
|
||||||
return self.doc;
|
return SkipResult::End;
|
||||||
|
}
|
||||||
|
match self.doc.cmp(&target) {
|
||||||
|
Ordering::Equal => {
|
||||||
|
return SkipResult::Reached;
|
||||||
|
}
|
||||||
|
Ordering::Greater => {
|
||||||
|
return SkipResult::OverStep;
|
||||||
|
}
|
||||||
|
Ordering::Less => {}
|
||||||
}
|
}
|
||||||
let gap = target - self.offset;
|
let gap = target - self.offset;
|
||||||
if gap < HORIZON {
|
if gap < HORIZON {
|
||||||
@@ -165,11 +174,18 @@ where
|
|||||||
|
|
||||||
// Advancing until we reach the end of the bucket
|
// Advancing until we reach the end of the bucket
|
||||||
// or we reach a doc greater or equal to the target.
|
// or we reach a doc greater or equal to the target.
|
||||||
let mut doc = self.doc();
|
while self.advance() {
|
||||||
while doc < target {
|
match self.doc().cmp(&target) {
|
||||||
doc = self.advance();
|
Ordering::Equal => {
|
||||||
|
return SkipResult::Reached;
|
||||||
|
}
|
||||||
|
Ordering::Greater => {
|
||||||
|
return SkipResult::OverStep;
|
||||||
|
}
|
||||||
|
Ordering::Less => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
doc
|
SkipResult::End
|
||||||
} else {
|
} else {
|
||||||
// clear the buffered info.
|
// clear the buffered info.
|
||||||
for obsolete_tinyset in self.bitsets.iter_mut() {
|
for obsolete_tinyset in self.bitsets.iter_mut() {
|
||||||
@@ -183,42 +199,45 @@ where
|
|||||||
// advance all docsets to a doc >= to the target.
|
// advance all docsets to a doc >= to the target.
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))]
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))]
|
||||||
unordered_drain_filter(&mut self.docsets, |docset| {
|
unordered_drain_filter(&mut self.docsets, |docset| {
|
||||||
docset.seek(target) == TERMINATED
|
if docset.doc() < target {
|
||||||
|
if docset.skip_next(target) == SkipResult::End {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
});
|
});
|
||||||
|
|
||||||
// at this point all of the docsets
|
// at this point all of the docsets
|
||||||
// are positionned on a doc >= to the target.
|
// are positionned on a doc >= to the target.
|
||||||
if !self.refill() {
|
if self.refill() {
|
||||||
self.doc = TERMINATED;
|
self.advance();
|
||||||
return TERMINATED;
|
if self.doc() == target {
|
||||||
|
SkipResult::Reached
|
||||||
|
} else {
|
||||||
|
debug_assert!(self.doc() > target);
|
||||||
|
SkipResult::OverStep
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
SkipResult::End
|
||||||
}
|
}
|
||||||
self.advance()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO Also implement `count` with deletes efficiently.
|
// TODO implement `count` efficiently.
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
self.doc
|
self.doc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
fn size_hint(&self) -> u32 {
|
||||||
self.docsets
|
0u32
|
||||||
.iter()
|
|
||||||
.map(|docset| docset.size_hint())
|
|
||||||
.max()
|
|
||||||
.unwrap_or(0u32)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn count_including_deleted(&mut self) -> u32 {
|
fn count_including_deleted(&mut self) -> u32 {
|
||||||
if self.doc == TERMINATED {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
|
let mut count = self.bitsets[self.cursor..HORIZON_NUM_TINYBITSETS]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|bitset| bitset.len())
|
.map(|bitset| bitset.len())
|
||||||
.sum::<u32>()
|
.sum::<u32>();
|
||||||
+ 1;
|
|
||||||
for bitset in self.bitsets.iter_mut() {
|
for bitset in self.bitsets.iter_mut() {
|
||||||
bitset.clear();
|
bitset.clear();
|
||||||
}
|
}
|
||||||
@@ -248,7 +267,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::Union;
|
use super::Union;
|
||||||
use super::HORIZON;
|
use super::HORIZON;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::postings::tests::test_skip_against_unoptimized;
|
use crate::postings::tests::test_skip_against_unoptimized;
|
||||||
use crate::query::score_combiner::DoNothingCombiner;
|
use crate::query::score_combiner::DoNothingCombiner;
|
||||||
use crate::query::ConstScorer;
|
use crate::query::ConstScorer;
|
||||||
@@ -277,12 +296,12 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let mut union: Union<_, DoNothingCombiner> = make_union();
|
let mut union: Union<_, DoNothingCombiner> = make_union();
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
while union.doc() != TERMINATED {
|
while union.advance() {
|
||||||
|
assert!(union_expected.advance());
|
||||||
assert_eq!(union_expected.doc(), union.doc());
|
assert_eq!(union_expected.doc(), union.doc());
|
||||||
assert_eq!(union_expected.advance(), union.advance());
|
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
assert_eq!(union_expected.advance(), TERMINATED);
|
assert!(!union_expected.advance());
|
||||||
assert_eq!(count, make_union().count_including_deleted());
|
assert_eq!(count, make_union().count_including_deleted());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -310,7 +329,9 @@ mod tests {
|
|||||||
fn test_aux_union_skip(docs_list: &[Vec<DocId>], skip_targets: Vec<DocId>) {
|
fn test_aux_union_skip(docs_list: &[Vec<DocId>], skip_targets: Vec<DocId>) {
|
||||||
let mut btree_set = BTreeSet::new();
|
let mut btree_set = BTreeSet::new();
|
||||||
for docs in docs_list {
|
for docs in docs_list {
|
||||||
btree_set.extend(docs.iter().cloned());
|
for &doc in docs.iter() {
|
||||||
|
btree_set.insert(doc);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let docset_factory = || {
|
let docset_factory = || {
|
||||||
let res: Box<dyn DocSet> = Box::new(Union::<_, DoNothingCombiner>::from(
|
let res: Box<dyn DocSet> = Box::new(Union::<_, DoNothingCombiner>::from(
|
||||||
@@ -325,10 +346,10 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let mut docset = docset_factory();
|
let mut docset = docset_factory();
|
||||||
for el in btree_set {
|
for el in btree_set {
|
||||||
|
assert!(docset.advance());
|
||||||
assert_eq!(el, docset.doc());
|
assert_eq!(el, docset.doc());
|
||||||
docset.advance();
|
|
||||||
}
|
}
|
||||||
assert_eq!(docset.doc(), TERMINATED);
|
assert!(!docset.advance());
|
||||||
test_skip_against_unoptimized(docset_factory, skip_targets);
|
test_skip_against_unoptimized(docset_factory, skip_targets);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -351,10 +372,10 @@ mod tests {
|
|||||||
ConstScorer::from(VecDocSet::from(vec![0u32, 5u32])),
|
ConstScorer::from(VecDocSet::from(vec![0u32, 5u32])),
|
||||||
ConstScorer::from(VecDocSet::from(vec![1u32, 4u32])),
|
ConstScorer::from(VecDocSet::from(vec![1u32, 4u32])),
|
||||||
]);
|
]);
|
||||||
|
assert!(docset.advance());
|
||||||
assert_eq!(docset.doc(), 0u32);
|
assert_eq!(docset.doc(), 0u32);
|
||||||
assert_eq!(docset.seek(0u32), 0u32);
|
assert_eq!(docset.skip_next(0u32), SkipResult::OverStep);
|
||||||
assert_eq!(docset.seek(0u32), 0u32);
|
assert_eq!(docset.doc(), 1u32)
|
||||||
assert_eq!(docset.doc(), 0u32)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::DocSet;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use std::num::Wrapping;
|
||||||
|
|
||||||
/// Simulate a `Postings` objects from a `VecPostings`.
|
/// Simulate a `Postings` objects from a `VecPostings`.
|
||||||
/// `VecPostings` only exist for testing purposes.
|
/// `VecPostings` only exist for testing purposes.
|
||||||
@@ -11,30 +12,26 @@ use crate::DocId;
|
|||||||
/// No positions are returned.
|
/// No positions are returned.
|
||||||
pub struct VecDocSet {
|
pub struct VecDocSet {
|
||||||
doc_ids: Vec<DocId>,
|
doc_ids: Vec<DocId>,
|
||||||
cursor: usize,
|
cursor: Wrapping<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Vec<DocId>> for VecDocSet {
|
impl From<Vec<DocId>> for VecDocSet {
|
||||||
fn from(doc_ids: Vec<DocId>) -> VecDocSet {
|
fn from(doc_ids: Vec<DocId>) -> VecDocSet {
|
||||||
VecDocSet { doc_ids, cursor: 0 }
|
VecDocSet {
|
||||||
|
doc_ids,
|
||||||
|
cursor: Wrapping(usize::max_value()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DocSet for VecDocSet {
|
impl DocSet for VecDocSet {
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> bool {
|
||||||
self.cursor += 1;
|
self.cursor += Wrapping(1);
|
||||||
if self.cursor >= self.doc_ids.len() {
|
self.doc_ids.len() > self.cursor.0
|
||||||
self.cursor = self.doc_ids.len();
|
|
||||||
return TERMINATED;
|
|
||||||
}
|
|
||||||
self.doc()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
if self.cursor == self.doc_ids.len() {
|
self.doc_ids[self.cursor.0]
|
||||||
return TERMINATED;
|
|
||||||
}
|
|
||||||
self.doc_ids[self.cursor]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn size_hint(&self) -> u32 {
|
fn size_hint(&self) -> u32 {
|
||||||
@@ -52,21 +49,22 @@ impl HasLen for VecDocSet {
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::docset::DocSet;
|
use crate::docset::{DocSet, SkipResult};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_vec_postings() {
|
pub fn test_vec_postings() {
|
||||||
let doc_ids: Vec<DocId> = (0u32..1024u32).map(|e| e * 3).collect();
|
let doc_ids: Vec<DocId> = (0u32..1024u32).map(|e| e * 3).collect();
|
||||||
let mut postings = VecDocSet::from(doc_ids);
|
let mut postings = VecDocSet::from(doc_ids);
|
||||||
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 0u32);
|
assert_eq!(postings.doc(), 0u32);
|
||||||
assert_eq!(postings.advance(), 3u32);
|
assert!(postings.advance());
|
||||||
assert_eq!(postings.doc(), 3u32);
|
assert_eq!(postings.doc(), 3u32);
|
||||||
assert_eq!(postings.seek(14u32), 15u32);
|
assert_eq!(postings.skip_next(14u32), SkipResult::OverStep);
|
||||||
assert_eq!(postings.doc(), 15u32);
|
assert_eq!(postings.doc(), 15u32);
|
||||||
assert_eq!(postings.seek(300u32), 300u32);
|
assert_eq!(postings.skip_next(300u32), SkipResult::Reached);
|
||||||
assert_eq!(postings.doc(), 300u32);
|
assert_eq!(postings.doc(), 300u32);
|
||||||
assert_eq!(postings.seek(6000u32), TERMINATED);
|
assert_eq!(postings.skip_next(6000u32), SkipResult::End);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,45 +1,7 @@
|
|||||||
use super::Scorer;
|
use super::Scorer;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::query::Explanation;
|
use crate::query::Explanation;
|
||||||
use crate::{DocId, Score, TERMINATED};
|
use crate::DocId;
|
||||||
|
|
||||||
/// Iterates through all of the document matched by the DocSet
|
|
||||||
/// `DocSet` and push the scored documents to the collector.
|
|
||||||
pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
|
|
||||||
scorer: &mut TScorer,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score),
|
|
||||||
) {
|
|
||||||
let mut doc = scorer.doc();
|
|
||||||
while doc != TERMINATED {
|
|
||||||
callback(doc, scorer.score());
|
|
||||||
doc = scorer.advance();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
|
||||||
/// is exceeding a given threshold.
|
|
||||||
///
|
|
||||||
/// This method is useful for the TopDocs collector.
|
|
||||||
/// For all docsets, the blanket implementation has the benefit
|
|
||||||
/// of prefiltering (doc, score) pairs, avoiding the
|
|
||||||
/// virtual dispatch cost.
|
|
||||||
///
|
|
||||||
/// More importantly, it makes it possible for scorers to implement
|
|
||||||
/// important optimization (e.g. BlockWAND for union).
|
|
||||||
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer + ?Sized>(
|
|
||||||
scorer: &mut TScorer,
|
|
||||||
mut threshold: f32,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
|
||||||
) {
|
|
||||||
let mut doc = scorer.doc();
|
|
||||||
while doc != TERMINATED {
|
|
||||||
let score = scorer.score();
|
|
||||||
if score > threshold {
|
|
||||||
threshold = callback(doc, score);
|
|
||||||
}
|
|
||||||
doc = scorer.advance();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A Weight is the specialization of a Query
|
/// A Weight is the specialization of a Query
|
||||||
/// for a given set of segments.
|
/// for a given set of segments.
|
||||||
@@ -65,37 +27,4 @@ pub trait Weight: Send + Sync + 'static {
|
|||||||
Ok(scorer.count_including_deleted())
|
Ok(scorer.count_including_deleted())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterates through all of the document matched by the DocSet
|
|
||||||
/// `DocSet` and push the scored documents to the collector.
|
|
||||||
fn for_each(
|
|
||||||
&self,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score),
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
|
||||||
for_each_scorer(scorer.as_mut(), callback);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls `callback` with all of the `(doc, score)` for which score
|
|
||||||
/// is exceeding a given threshold.
|
|
||||||
///
|
|
||||||
/// This method is useful for the TopDocs collector.
|
|
||||||
/// For all docsets, the blanket implementation has the benefit
|
|
||||||
/// of prefiltering (doc, score) pairs, avoiding the
|
|
||||||
/// virtual dispatch cost.
|
|
||||||
///
|
|
||||||
/// More importantly, it makes it possible for scorers to implement
|
|
||||||
/// important optimization (e.g. BlockWAND for union).
|
|
||||||
fn for_each_pruning(
|
|
||||||
&self,
|
|
||||||
threshold: f32,
|
|
||||||
reader: &SegmentReader,
|
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
|
||||||
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
84
src/reader/index_writer_reader.rs
Normal file
84
src/reader/index_writer_reader.rs
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
use crate::directory::{WatchCallbackList, WatchHandle};
|
||||||
|
use crate::indexer::SegmentRegisters;
|
||||||
|
use crate::reader::pool::Pool;
|
||||||
|
use crate::{Index, LeasedItem, Searcher, Segment, SegmentReader};
|
||||||
|
use std::iter::repeat_with;
|
||||||
|
use std::sync::{Arc, RwLock, Weak};
|
||||||
|
|
||||||
|
struct InnerNRTReader {
|
||||||
|
num_searchers: usize,
|
||||||
|
index: Index,
|
||||||
|
searcher_pool: Pool<Searcher>,
|
||||||
|
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InnerNRTReader {
|
||||||
|
fn load_segment_readers(&self) -> crate::Result<Vec<SegmentReader>> {
|
||||||
|
let segments: Vec<Segment> = self
|
||||||
|
.segment_registers
|
||||||
|
.read()
|
||||||
|
.expect("lock should never be polluted. Please report.")
|
||||||
|
.committed_segment();
|
||||||
|
segments
|
||||||
|
.iter()
|
||||||
|
.map(SegmentReader::open)
|
||||||
|
.collect::<crate::Result<Vec<SegmentReader>>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reload(&self) -> crate::Result<()> {
|
||||||
|
let segment_readers: Vec<SegmentReader> = self.load_segment_readers()?;
|
||||||
|
let schema = self.index.schema();
|
||||||
|
let searchers = repeat_with(|| {
|
||||||
|
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
|
||||||
|
})
|
||||||
|
.take(self.num_searchers)
|
||||||
|
.collect();
|
||||||
|
self.searcher_pool.publish_new_generation(searchers);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||||
|
self.searcher_pool.acquire()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct NRTReader {
|
||||||
|
inner: Arc<InnerNRTReader>,
|
||||||
|
watch_handle: WatchHandle,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NRTReader {
|
||||||
|
pub fn reload(&self) -> crate::Result<()> {
|
||||||
|
self.inner.reload()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||||
|
self.inner.searcher()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn create(
|
||||||
|
num_searchers: usize,
|
||||||
|
index: Index,
|
||||||
|
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||||
|
watch_callback_list: &WatchCallbackList,
|
||||||
|
) -> crate::Result<Self> {
|
||||||
|
let inner_reader: Arc<InnerNRTReader> = Arc::new(InnerNRTReader {
|
||||||
|
num_searchers,
|
||||||
|
index,
|
||||||
|
searcher_pool: Pool::new(),
|
||||||
|
segment_registers,
|
||||||
|
});
|
||||||
|
let inner_reader_weak: Weak<InnerNRTReader> = Arc::downgrade(&inner_reader);
|
||||||
|
let watch_handle = watch_callback_list.subscribe(Box::new(move || {
|
||||||
|
if let Some(nrt_reader_arc) = inner_reader_weak.upgrade() {
|
||||||
|
let _ = nrt_reader_arc.reload();
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
inner_reader.reload()?;
|
||||||
|
Ok(NRTReader {
|
||||||
|
inner: inner_reader,
|
||||||
|
watch_handle,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
177
src/reader/meta_file_reader.rs
Normal file
177
src/reader/meta_file_reader.rs
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
use super::pool::Pool;
|
||||||
|
use crate::core::Segment;
|
||||||
|
use crate::directory::Directory;
|
||||||
|
use crate::directory::WatchHandle;
|
||||||
|
use crate::directory::META_LOCK;
|
||||||
|
use crate::Searcher;
|
||||||
|
use crate::SegmentReader;
|
||||||
|
use crate::{Index, LeasedItem};
|
||||||
|
use crate::{IndexReader, Result};
|
||||||
|
use std::iter::repeat_with;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
/// Defines when a new version of the index should be reloaded.
|
||||||
|
///
|
||||||
|
/// Regardless of whether you search and index in the same process, tantivy does not necessarily
|
||||||
|
/// reflects the change that are commited to your index. `ReloadPolicy` precisely helps you define
|
||||||
|
/// when you want your index to be reloaded.
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub enum ReloadPolicy {
|
||||||
|
/// The index is entirely reloaded manually.
|
||||||
|
/// All updates of the index should be manual.
|
||||||
|
///
|
||||||
|
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
|
||||||
|
Manual,
|
||||||
|
/// The index is reloaded within milliseconds after a new commit is available.
|
||||||
|
/// This is made possible by watching changes in the `meta.json` file.
|
||||||
|
OnCommit, // TODO add NEAR_REAL_TIME(target_ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `IndexReader` builder
|
||||||
|
///
|
||||||
|
/// It makes it possible to set the following values.
|
||||||
|
///
|
||||||
|
/// - `num_searchers` (by default, the number of detected CPU threads):
|
||||||
|
///
|
||||||
|
/// When `num_searchers` queries are requested at the same time, the `num_searchers` will block
|
||||||
|
/// until the one of the searcher in-use gets released.
|
||||||
|
/// - `reload_policy` (by default `ReloadPolicy::OnCommit`):
|
||||||
|
///
|
||||||
|
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct IndexReaderBuilder {
|
||||||
|
num_searchers: usize,
|
||||||
|
reload_policy: ReloadPolicy,
|
||||||
|
index: Index,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexReaderBuilder {
|
||||||
|
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
|
||||||
|
IndexReaderBuilder {
|
||||||
|
num_searchers: num_cpus::get(),
|
||||||
|
reload_policy: ReloadPolicy::Manual,
|
||||||
|
index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the reload_policy.
|
||||||
|
///
|
||||||
|
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
||||||
|
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
|
||||||
|
self.reload_policy = reload_policy;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the number of `Searcher` in the searcher pool.
|
||||||
|
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
|
||||||
|
self.num_searchers = num_searchers;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Building the reader is a non-trivial operation that requires
|
||||||
|
/// to open different segment readers. It may take hundreds of milliseconds
|
||||||
|
/// of time and it may return an error.
|
||||||
|
pub fn try_into(self) -> crate::Result<IndexReader> {
|
||||||
|
let inner_reader = MetaFileIndexReaderInner {
|
||||||
|
index: self.index,
|
||||||
|
num_searchers: self.num_searchers,
|
||||||
|
searcher_pool: Pool::new(),
|
||||||
|
};
|
||||||
|
inner_reader.reload()?;
|
||||||
|
let inner_reader_arc = Arc::new(inner_reader);
|
||||||
|
let watch_handle_opt: Option<WatchHandle>;
|
||||||
|
match self.reload_policy {
|
||||||
|
ReloadPolicy::Manual => {
|
||||||
|
// No need to set anything...
|
||||||
|
watch_handle_opt = None;
|
||||||
|
}
|
||||||
|
ReloadPolicy::OnCommit => {
|
||||||
|
let inner_reader_arc_clone = inner_reader_arc.clone();
|
||||||
|
let callback = move || {
|
||||||
|
if let Err(err) = inner_reader_arc_clone.reload() {
|
||||||
|
error!(
|
||||||
|
"Error while loading searcher after commit was detected. {:?}",
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let watch_handle = inner_reader_arc
|
||||||
|
.index
|
||||||
|
.directory()
|
||||||
|
.watch(Box::new(callback))?;
|
||||||
|
watch_handle_opt = Some(watch_handle);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(IndexReader::from(MetaFileIndexReader {
|
||||||
|
inner: inner_reader_arc,
|
||||||
|
watch_handle_opt,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MetaFileIndexReaderInner {
|
||||||
|
num_searchers: usize,
|
||||||
|
searcher_pool: Pool<Searcher>,
|
||||||
|
index: Index,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MetaFileIndexReaderInner {
|
||||||
|
fn load_segment_readers(&self) -> crate::Result<Vec<SegmentReader>> {
|
||||||
|
// We keep the lock until we have effectively finished opening the
|
||||||
|
// the `SegmentReader` because it prevents a diffferent process
|
||||||
|
// to garbage collect these file while we open them.
|
||||||
|
//
|
||||||
|
// Once opened, on linux & mac, the mmap will remain valid after
|
||||||
|
// the file has been deleted
|
||||||
|
// On windows, the file deletion will fail.
|
||||||
|
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
|
||||||
|
let searchable_segments = self.searchable_segments()?;
|
||||||
|
searchable_segments
|
||||||
|
.iter()
|
||||||
|
.map(SegmentReader::open)
|
||||||
|
.collect::<Result<_>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reload(&self) -> crate::Result<()> {
|
||||||
|
let segment_readers: Vec<SegmentReader> = self.load_segment_readers()?;
|
||||||
|
let schema = self.index.schema();
|
||||||
|
let searchers = repeat_with(|| {
|
||||||
|
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
|
||||||
|
})
|
||||||
|
.take(self.num_searchers)
|
||||||
|
.collect();
|
||||||
|
self.searcher_pool.publish_new_generation(searchers);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the list of segments that are searchable
|
||||||
|
fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
|
||||||
|
self.index.searchable_segments()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn searcher(&self) -> LeasedItem<Searcher> {
|
||||||
|
self.searcher_pool.acquire()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `IndexReader` is your entry point to read and search the index.
|
||||||
|
///
|
||||||
|
/// It controls when a new version of the index should be loaded and lends
|
||||||
|
/// you instances of `Searcher` for the last loaded version.
|
||||||
|
///
|
||||||
|
/// `Clone` does not clone the different pool of searcher. `IndexReader`
|
||||||
|
/// just wraps and `Arc`.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct MetaFileIndexReader {
|
||||||
|
inner: Arc<MetaFileIndexReaderInner>,
|
||||||
|
watch_handle_opt: Option<WatchHandle>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MetaFileIndexReader {
|
||||||
|
pub fn reload(&self) -> crate::Result<()> {
|
||||||
|
self.inner.reload()
|
||||||
|
}
|
||||||
|
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||||
|
self.inner.searcher()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,159 +1,14 @@
|
|||||||
|
mod index_writer_reader;
|
||||||
|
mod meta_file_reader;
|
||||||
mod pool;
|
mod pool;
|
||||||
|
|
||||||
|
use self::meta_file_reader::MetaFileIndexReader;
|
||||||
|
pub use self::meta_file_reader::{IndexReaderBuilder, ReloadPolicy};
|
||||||
pub use self::pool::LeasedItem;
|
pub use self::pool::LeasedItem;
|
||||||
use self::pool::Pool;
|
|
||||||
use crate::core::Segment;
|
pub(crate) use crate::reader::index_writer_reader::NRTReader;
|
||||||
use crate::directory::Directory;
|
|
||||||
use crate::directory::WatchHandle;
|
|
||||||
use crate::directory::META_LOCK;
|
|
||||||
use crate::Index;
|
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
use crate::SegmentReader;
|
|
||||||
use std::convert::TryInto;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
/// Defines when a new version of the index should be reloaded.
|
|
||||||
///
|
|
||||||
/// Regardless of whether you search and index in the same process, tantivy does not necessarily
|
|
||||||
/// reflects the change that are commited to your index. `ReloadPolicy` precisely helps you define
|
|
||||||
/// when you want your index to be reloaded.
|
|
||||||
#[derive(Clone, Copy)]
|
|
||||||
pub enum ReloadPolicy {
|
|
||||||
/// The index is entirely reloaded manually.
|
|
||||||
/// All updates of the index should be manual.
|
|
||||||
///
|
|
||||||
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
|
|
||||||
Manual,
|
|
||||||
/// The index is reloaded within milliseconds after a new commit is available.
|
|
||||||
/// This is made possible by watching changes in the `meta.json` file.
|
|
||||||
OnCommit, // TODO add NEAR_REAL_TIME(target_ms)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `IndexReader` builder
|
|
||||||
///
|
|
||||||
/// It makes it possible to set the following values.
|
|
||||||
///
|
|
||||||
/// - `num_searchers` (by default, the number of detected CPU threads):
|
|
||||||
///
|
|
||||||
/// When `num_searchers` queries are requested at the same time, the `num_searchers` will block
|
|
||||||
/// until the one of the searcher in-use gets released.
|
|
||||||
/// - `reload_policy` (by default `ReloadPolicy::OnCommit`):
|
|
||||||
///
|
|
||||||
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct IndexReaderBuilder {
|
|
||||||
num_searchers: usize,
|
|
||||||
reload_policy: ReloadPolicy,
|
|
||||||
index: Index,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IndexReaderBuilder {
|
|
||||||
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
|
|
||||||
IndexReaderBuilder {
|
|
||||||
num_searchers: num_cpus::get(),
|
|
||||||
reload_policy: ReloadPolicy::OnCommit,
|
|
||||||
index,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds the reader.
|
|
||||||
///
|
|
||||||
/// Building the reader is a non-trivial operation that requires
|
|
||||||
/// to open different segment readers. It may take hundreds of milliseconds
|
|
||||||
/// of time and it may return an error.
|
|
||||||
pub fn try_into(self) -> crate::Result<IndexReader> {
|
|
||||||
let inner_reader = InnerIndexReader {
|
|
||||||
index: self.index,
|
|
||||||
num_searchers: self.num_searchers,
|
|
||||||
searcher_pool: Pool::new(),
|
|
||||||
};
|
|
||||||
inner_reader.reload()?;
|
|
||||||
let inner_reader_arc = Arc::new(inner_reader);
|
|
||||||
let watch_handle_opt: Option<WatchHandle>;
|
|
||||||
match self.reload_policy {
|
|
||||||
ReloadPolicy::Manual => {
|
|
||||||
// No need to set anything...
|
|
||||||
watch_handle_opt = None;
|
|
||||||
}
|
|
||||||
ReloadPolicy::OnCommit => {
|
|
||||||
let inner_reader_arc_clone = inner_reader_arc.clone();
|
|
||||||
let callback = move || {
|
|
||||||
if let Err(err) = inner_reader_arc_clone.reload() {
|
|
||||||
error!(
|
|
||||||
"Error while loading searcher after commit was detected. {:?}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let watch_handle = inner_reader_arc
|
|
||||||
.index
|
|
||||||
.directory()
|
|
||||||
.watch(Box::new(callback))?;
|
|
||||||
watch_handle_opt = Some(watch_handle);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(IndexReader {
|
|
||||||
inner: inner_reader_arc,
|
|
||||||
watch_handle_opt,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the reload_policy.
|
|
||||||
///
|
|
||||||
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
|
||||||
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
|
|
||||||
self.reload_policy = reload_policy;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the number of `Searcher` in the searcher pool.
|
|
||||||
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
|
|
||||||
self.num_searchers = num_searchers;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryInto<IndexReader> for IndexReaderBuilder {
|
|
||||||
type Error = crate::TantivyError;
|
|
||||||
|
|
||||||
fn try_into(self) -> crate::Result<IndexReader> {
|
|
||||||
IndexReaderBuilder::try_into(self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct InnerIndexReader {
|
|
||||||
num_searchers: usize,
|
|
||||||
searcher_pool: Pool<Searcher>,
|
|
||||||
index: Index,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InnerIndexReader {
|
|
||||||
fn reload(&self) -> crate::Result<()> {
|
|
||||||
let segment_readers: Vec<SegmentReader> = {
|
|
||||||
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
|
|
||||||
let searchable_segments = self.searchable_segments()?;
|
|
||||||
searchable_segments
|
|
||||||
.iter()
|
|
||||||
.map(SegmentReader::open)
|
|
||||||
.collect::<crate::Result<_>>()?
|
|
||||||
};
|
|
||||||
let schema = self.index.schema();
|
|
||||||
let searchers = (0..self.num_searchers)
|
|
||||||
.map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
|
|
||||||
.collect();
|
|
||||||
self.searcher_pool.publish_new_generation(searchers);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the list of segments that are searchable
|
|
||||||
fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
|
|
||||||
self.index.searchable_segments()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn searcher(&self) -> LeasedItem<Searcher> {
|
|
||||||
self.searcher_pool.acquire()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `IndexReader` is your entry point to read and search the index.
|
/// `IndexReader` is your entry point to read and search the index.
|
||||||
///
|
///
|
||||||
@@ -163,17 +18,12 @@ impl InnerIndexReader {
|
|||||||
/// `Clone` does not clone the different pool of searcher. `IndexReader`
|
/// `Clone` does not clone the different pool of searcher. `IndexReader`
|
||||||
/// just wraps and `Arc`.
|
/// just wraps and `Arc`.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct IndexReader {
|
pub enum IndexReader {
|
||||||
inner: Arc<InnerIndexReader>,
|
FromMetaFile(MetaFileIndexReader),
|
||||||
watch_handle_opt: Option<WatchHandle>,
|
NRT(NRTReader),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexReader {
|
impl IndexReader {
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) fn index(&self) -> Index {
|
|
||||||
self.inner.index.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update searchers so that they reflect the state of the last
|
/// Update searchers so that they reflect the state of the last
|
||||||
/// `.commit()`.
|
/// `.commit()`.
|
||||||
///
|
///
|
||||||
@@ -184,7 +34,10 @@ impl IndexReader {
|
|||||||
/// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests
|
/// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests
|
||||||
/// it can be nice to deterministically force the reload of searchers.
|
/// it can be nice to deterministically force the reload of searchers.
|
||||||
pub fn reload(&self) -> crate::Result<()> {
|
pub fn reload(&self) -> crate::Result<()> {
|
||||||
self.inner.reload()
|
match self {
|
||||||
|
IndexReader::FromMetaFile(meta_file_reader) => meta_file_reader.reload(),
|
||||||
|
IndexReader::NRT(nrt_reader) => nrt_reader.reload(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a searcher
|
/// Returns a searcher
|
||||||
@@ -198,6 +51,21 @@ impl IndexReader {
|
|||||||
/// The same searcher must be used for a given query, as it ensures
|
/// The same searcher must be used for a given query, as it ensures
|
||||||
/// the use of a consistent segment set.
|
/// the use of a consistent segment set.
|
||||||
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||||
self.inner.searcher()
|
match self {
|
||||||
|
IndexReader::FromMetaFile(meta_file_reader) => meta_file_reader.searcher(),
|
||||||
|
IndexReader::NRT(nrt_reader) => nrt_reader.searcher(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<MetaFileIndexReader> for IndexReader {
|
||||||
|
fn from(meta_file_reader: MetaFileIndexReader) -> Self {
|
||||||
|
IndexReader::FromMetaFile(meta_file_reader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<NRTReader> for IndexReader {
|
||||||
|
fn from(nrt_reader: NRTReader) -> Self {
|
||||||
|
IndexReader::NRT(nrt_reader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ use crate::common::BinarySerializable;
|
|||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
use crate::tokenizer::PreTokenizedString;
|
use crate::tokenizer::PreTokenizedString;
|
||||||
use crate::DateTime;
|
use crate::DateTime;
|
||||||
|
use serde;
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
|
use serde;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -12,13 +13,13 @@ pub struct Field(u32);
|
|||||||
|
|
||||||
impl Field {
|
impl Field {
|
||||||
/// Create a new field object for the given FieldId.
|
/// Create a new field object for the given FieldId.
|
||||||
pub const fn from_field_id(field_id: u32) -> Field {
|
pub fn from_field_id(field_id: u32) -> Field {
|
||||||
Field(field_id)
|
Field(field_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a u32 identifying uniquely a field within a schema.
|
/// Returns a u32 identifying uniquely a field within a schema.
|
||||||
#[allow(clippy::trivially_copy_pass_by_ref)]
|
#[allow(clippy::trivially_copy_pass_by_ref)]
|
||||||
pub const fn field_id(&self) -> u32 {
|
pub fn field_id(&self) -> u32 {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use std::io::{self, Read, Write};
|
use serde;
|
||||||
|
use std::io;
|
||||||
|
use std::io::Read;
|
||||||
|
use std::io::Write;
|
||||||
|
|
||||||
/// `FieldValue` holds together a `Field` and its `Value`.
|
/// `FieldValue` holds together a `Field` and its `Value`.
|
||||||
#[derive(Debug, Clone, Ord, PartialEq, Eq, PartialOrd, serde::Serialize, serde::Deserialize)]
|
#[derive(Debug, Clone, Ord, PartialEq, Eq, PartialOrd, serde::Serialize, serde::Deserialize)]
|
||||||
|
|||||||
@@ -156,17 +156,30 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_field_options() {
|
fn test_field_options() {
|
||||||
let field_options = STORED | TEXT;
|
{
|
||||||
assert!(field_options.is_stored());
|
let field_options = STORED | TEXT;
|
||||||
assert!(field_options.get_indexing_options().is_some());
|
assert!(field_options.is_stored());
|
||||||
let mut schema_builder = Schema::builder();
|
assert!(field_options.get_indexing_options().is_some());
|
||||||
schema_builder.add_text_field("body", TEXT);
|
}
|
||||||
let schema = schema_builder.build();
|
{
|
||||||
let field = schema.get_field("body").unwrap();
|
let mut schema_builder = Schema::builder();
|
||||||
let field_entry = schema.get_field_entry(field);
|
schema_builder.add_text_field("body", TEXT);
|
||||||
assert!(matches!(field_entry.field_type(),
|
let schema = schema_builder.build();
|
||||||
&FieldType::Str(ref text_options)
|
let field = schema.get_field("body").unwrap();
|
||||||
if text_options.get_indexing_options().unwrap().tokenizer() == "default"));
|
let field_entry = schema.get_field_entry(field);
|
||||||
|
match field_entry.field_type() {
|
||||||
|
&FieldType::Str(ref text_options) => {
|
||||||
|
assert!(text_options.get_indexing_options().is_some());
|
||||||
|
assert_eq!(
|
||||||
|
text_options.get_indexing_options().unwrap().tokenizer(),
|
||||||
|
"default"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
panic!("");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
use snap;
|
||||||
|
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
|
|
||||||
/// Name of the compression scheme used in the doc store.
|
/// Name of the compression scheme used in the doc store.
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ use super::skiplist::SkipListBuilder;
|
|||||||
use super::StoreReader;
|
use super::StoreReader;
|
||||||
use crate::common::CountingWriter;
|
use crate::common::CountingWriter;
|
||||||
use crate::common::{BinarySerializable, VInt};
|
use crate::common::{BinarySerializable, VInt};
|
||||||
use crate::directory::TerminatingWrite;
|
|
||||||
use crate::directory::WritePtr;
|
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use std::io::{self, Write};
|
use std::io::{self, Write};
|
||||||
@@ -19,20 +17,20 @@ const BLOCK_SIZE: usize = 16_384;
|
|||||||
///
|
///
|
||||||
/// The skip list index on the other hand, is built in memory.
|
/// The skip list index on the other hand, is built in memory.
|
||||||
///
|
///
|
||||||
pub struct StoreWriter {
|
pub struct StoreWriter<W: io::Write> {
|
||||||
doc: DocId,
|
doc: DocId,
|
||||||
offset_index_writer: SkipListBuilder<u64>,
|
offset_index_writer: SkipListBuilder<u64>,
|
||||||
writer: CountingWriter<WritePtr>,
|
writer: CountingWriter<W>,
|
||||||
intermediary_buffer: Vec<u8>,
|
intermediary_buffer: Vec<u8>,
|
||||||
current_block: Vec<u8>,
|
current_block: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StoreWriter {
|
impl<W: io::Write> StoreWriter<W> {
|
||||||
/// Create a store writer.
|
/// Create a store writer.
|
||||||
///
|
///
|
||||||
/// The store writer will writes blocks on disc as
|
/// The store writer will writes blocks on disc as
|
||||||
/// document are added.
|
/// document are added.
|
||||||
pub fn new(writer: WritePtr) -> StoreWriter {
|
pub fn new(writer: W) -> StoreWriter<W> {
|
||||||
StoreWriter {
|
StoreWriter {
|
||||||
doc: 0,
|
doc: 0,
|
||||||
offset_index_writer: SkipListBuilder::new(4),
|
offset_index_writer: SkipListBuilder::new(4),
|
||||||
@@ -102,7 +100,9 @@ impl StoreWriter {
|
|||||||
///
|
///
|
||||||
/// Compress the last unfinished block if any,
|
/// Compress the last unfinished block if any,
|
||||||
/// and serializes the skip list index on disc.
|
/// and serializes the skip list index on disc.
|
||||||
pub fn close(mut self) -> io::Result<()> {
|
///
|
||||||
|
/// The returned writer is not flushed.
|
||||||
|
pub fn close(mut self) -> io::Result<W> {
|
||||||
if !self.current_block.is_empty() {
|
if !self.current_block.is_empty() {
|
||||||
self.write_and_compress_block()?;
|
self.write_and_compress_block()?;
|
||||||
}
|
}
|
||||||
@@ -110,6 +110,7 @@ impl StoreWriter {
|
|||||||
self.offset_index_writer.write(&mut self.writer)?;
|
self.offset_index_writer.write(&mut self.writer)?;
|
||||||
header_offset.serialize(&mut self.writer)?;
|
header_offset.serialize(&mut self.writer)?;
|
||||||
self.doc.serialize(&mut self.writer)?;
|
self.doc.serialize(&mut self.writer)?;
|
||||||
self.writer.terminate()
|
let (wrt, _) = self.writer.finish()?;
|
||||||
|
Ok(wrt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -434,7 +434,6 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_automaton_search() {
|
fn test_automaton_search() {
|
||||||
use crate::query::DFAWrapper;
|
|
||||||
use levenshtein_automata::LevenshteinAutomatonBuilder;
|
use levenshtein_automata::LevenshteinAutomatonBuilder;
|
||||||
|
|
||||||
const COUNTRIES: [&'static str; 7] = [
|
const COUNTRIES: [&'static str; 7] = [
|
||||||
@@ -464,7 +463,7 @@ mod tests {
|
|||||||
|
|
||||||
// We can now build an entire dfa.
|
// We can now build an entire dfa.
|
||||||
let lev_automaton_builder = LevenshteinAutomatonBuilder::new(2, true);
|
let lev_automaton_builder = LevenshteinAutomatonBuilder::new(2, true);
|
||||||
let automaton = DFAWrapper(lev_automaton_builder.build_dfa("Spaen"));
|
let automaton = lev_automaton_builder.build_dfa("Spaen");
|
||||||
|
|
||||||
let mut range = term_dict.search(automaton).into_stream();
|
let mut range = term_dict.search(automaton).into_stream();
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use crate::postings::TermInfo;
|
|||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use std::io::{self, Write};
|
use std::io::{self, Write};
|
||||||
|
use tantivy_fst;
|
||||||
use tantivy_fst::raw::Fst;
|
use tantivy_fst::raw::Fst;
|
||||||
use tantivy_fst::Automaton;
|
use tantivy_fst::Automaton;
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user