mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-03 15:52:55 +00:00
Compare commits
44 Commits
nodeffeatf
...
limit-rand
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9eb87e91cc | ||
|
|
36f43da4d8 | ||
|
|
19a02b2c30 | ||
|
|
c339b05789 | ||
|
|
2d3c657f9d | ||
|
|
07f9b828ae | ||
|
|
70bae7ce4c | ||
|
|
ac2a7273e6 | ||
|
|
4ce9517a82 | ||
|
|
73024a8af3 | ||
|
|
e70e605fc3 | ||
|
|
439d6956a9 | ||
|
|
6530bf0eae | ||
|
|
151498cbe7 | ||
|
|
3a72b1cb98 | ||
|
|
2737822620 | ||
|
|
06c12ae221 | ||
|
|
4e4400af7f | ||
|
|
3f1ecf53ab | ||
|
|
0b583b8130 | ||
|
|
31d18dca1c | ||
|
|
5e06e7de5a | ||
|
|
8af53cbd36 | ||
|
|
4914076e8f | ||
|
|
e04f47e922 | ||
|
|
f355695581 | ||
|
|
cbacdf0de8 | ||
|
|
3dd0322f4c | ||
|
|
2481c87be8 | ||
|
|
b6a664b5f8 | ||
|
|
25b666a7c9 | ||
|
|
9b41912e66 | ||
|
|
8e74bb98b5 | ||
|
|
6db8bb49d6 | ||
|
|
410aed0176 | ||
|
|
00a239a712 | ||
|
|
68fe406924 | ||
|
|
f71b04acb0 | ||
|
|
1ab7f660a4 | ||
|
|
0ebbc4cb5a | ||
|
|
5300cb5da0 | ||
|
|
7d773abc92 | ||
|
|
c34541ccce | ||
|
|
1cc5bd706c |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,4 +1,5 @@
|
|||||||
tantivy.iml
|
tantivy.iml
|
||||||
|
proptest-regressions
|
||||||
*.swp
|
*.swp
|
||||||
target
|
target
|
||||||
target/debug
|
target/debug
|
||||||
@@ -11,3 +12,4 @@ cpp/simdcomp/bitpackingbenchmark
|
|||||||
*.bk
|
*.bk
|
||||||
.idea
|
.idea
|
||||||
trace.dat
|
trace.dat
|
||||||
|
cargo-timing*
|
||||||
|
|||||||
18
CHANGELOG.md
18
CHANGELOG.md
@@ -1,5 +1,21 @@
|
|||||||
|
Tantivy 0.14.0
|
||||||
|
=========================
|
||||||
|
- Remove dependency to atomicwrites #833 .Implemented by @pmasurel upon suggestion and research from @asafigan).
|
||||||
|
|
||||||
|
Tantivy 0.13.1
|
||||||
|
===================
|
||||||
|
Made `Query` and `Collector` `Send + Sync`.
|
||||||
|
Updated misc dependency versions.
|
||||||
|
|
||||||
Tantivy 0.13.0
|
Tantivy 0.13.0
|
||||||
======================
|
======================
|
||||||
|
Tantivy 0.13 introduce a change in the index format that will require
|
||||||
|
you to reindex your index (BlockWAND information are added in the skiplist).
|
||||||
|
The index size increase is minor as this information is only added for
|
||||||
|
full blocks.
|
||||||
|
If you have a massive index for which reindexing is not an option, please contact me
|
||||||
|
so that we can discuss possible solutions.
|
||||||
|
|
||||||
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
|
||||||
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
|
||||||
- `MMapDirectory::open` does not return a `Result` anymore.
|
- `MMapDirectory::open` does not return a `Result` anymore.
|
||||||
@@ -17,6 +33,8 @@ while doc != TERMINATED {
|
|||||||
The change made it possible to greatly simplify a lot of the docset's code.
|
The change made it possible to greatly simplify a lot of the docset's code.
|
||||||
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
|
||||||
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
- Added an offset option to the Top(.*)Collectors. (@robyoung)
|
||||||
|
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
|
||||||
|
to the PISA team for answering all my questions!)
|
||||||
|
|
||||||
Tantivy 0.12.0
|
Tantivy 0.12.0
|
||||||
======================
|
======================
|
||||||
|
|||||||
40
Cargo.toml
40
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.12.0"
|
version = "0.14.0-dev"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -13,21 +13,20 @@ keywords = ["search", "information", "retrieval"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.12.0"
|
base64 = "0.12"
|
||||||
byteorder = "1.0"
|
byteorder = "1"
|
||||||
crc32fast = "1.2.0"
|
crc32fast = "1"
|
||||||
once_cell = "1.0"
|
once_cell = "1"
|
||||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
regex ={version = "1", default-features = false, features = ["std"]}
|
||||||
tantivy-fst = "0.3"
|
tantivy-fst = "0.3"
|
||||||
memmap = {version = "0.7", optional=true}
|
memmap = {version = "0.7", optional=true}
|
||||||
lz4 = {version="1.20", optional=true}
|
lz4 = {version="1", optional=true}
|
||||||
snap = "1"
|
snap = "1"
|
||||||
atomicwrites = {version="0.2.2", optional=true}
|
tempfile = {version="3", optional=true}
|
||||||
tempfile = "3.0"
|
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
serde = {version="1.0", features=["derive"]}
|
serde = {version="1", features=["derive"]}
|
||||||
serde_json = "1.0"
|
serde_json = "1"
|
||||||
num_cpus = "1.2"
|
num_cpus = "1"
|
||||||
fs2={version="0.4", optional=true}
|
fs2={version="0.4", optional=true}
|
||||||
levenshtein_automata = "0.2"
|
levenshtein_automata = "0.2"
|
||||||
notify = {version="4", optional=true}
|
notify = {version="4", optional=true}
|
||||||
@@ -35,20 +34,20 @@ uuid = { version = "0.8", features = ["v4", "serde"] }
|
|||||||
crossbeam = "0.7"
|
crossbeam = "0.7"
|
||||||
futures = {version = "0.3", features=["thread-pool"] }
|
futures = {version = "0.3", features=["thread-pool"] }
|
||||||
owning_ref = "0.4"
|
owning_ref = "0.4"
|
||||||
stable_deref_trait = "1.0.0"
|
tantivy-query-grammar = { version="0.14.0-dev", path="./query-grammar" }
|
||||||
rust-stemmers = "1.2"
|
stable_deref_trait = "1"
|
||||||
downcast-rs = { version="1.0" }
|
rust-stemmers = "1"
|
||||||
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
|
downcast-rs = "1"
|
||||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||||
census = "0.4"
|
census = "0.4"
|
||||||
fnv = "1.0.6"
|
fnv = "1"
|
||||||
owned-read = "0.4"
|
owned-read = "0.4"
|
||||||
failure = "0.1"
|
failure = "0.1"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3"
|
||||||
fail = "0.4"
|
fail = "0.4"
|
||||||
murmurhash32 = "0.2"
|
murmurhash32 = "0.2"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
smallvec = "1.0"
|
smallvec = "1"
|
||||||
rayon = "1"
|
rayon = "1"
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
@@ -58,6 +57,7 @@ winapi = "0.3"
|
|||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
maplit = "1"
|
maplit = "1"
|
||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
|
proptest = "0.10"
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
[dev-dependencies.fail]
|
||||||
version = "0.4"
|
version = "0.4"
|
||||||
@@ -74,7 +74,7 @@ overflow-checks = true
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["mmap"]
|
default = ["mmap"]
|
||||||
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
|
mmap = ["fs2", "tempfile", "memmap", "notify"]
|
||||||
lz4-compression = ["lz4"]
|
lz4-compression = ["lz4"]
|
||||||
failpoints = ["fail/failpoints"]
|
failpoints = ["fail/failpoints"]
|
||||||
unstable = [] # useful for benches.
|
unstable = [] # useful for benches.
|
||||||
|
|||||||
@@ -34,11 +34,6 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
|||||||
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
|
||||||
performance for different type of queries / collection.
|
performance for different type of queries / collection.
|
||||||
|
|
||||||
|
|
||||||
In general, Tantivy tends to be
|
|
||||||
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
|
|
||||||
- faster than Lucene on intersection and phrase queries.
|
|
||||||
|
|
||||||
Your mileage WILL vary depending on the nature of queries and their load.
|
Your mileage WILL vary depending on the nature of queries and their load.
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|||||||
50
doc/src/index-format.md
Normal file
50
doc/src/index-format.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
|
||||||
|
# Managed files
|
||||||
|
+----------+-----------+-------------------+
|
||||||
|
| content | footer | footer_len: u32 |
|
||||||
|
+----------+-----------+-------------------+
|
||||||
|
|
||||||
|
# Term Dictionary (Composite File)
|
||||||
|
|
||||||
|
+---------+---------------------------+------------------------+
|
||||||
|
| fst | term_info_store | footer_len: u64 |
|
||||||
|
+---------+---------------------------+------------------------+
|
||||||
|
|
||||||
|
During a merge the term info store need to fit in memory.
|
||||||
|
It has a cost of n bytes per term.
|
||||||
|
|
||||||
|
# term_info_store
|
||||||
|
+-------------------+---------------------------+------------------------+
|
||||||
|
| len_block_meta | block_meta | term_infos |
|
||||||
|
+-------------------+---------------------------+------------------------+
|
||||||
|
|
||||||
|
# inverted_index
|
||||||
|
+------------------------+---------------------------+------------------------+
|
||||||
|
| total_num_tokens: u64 | posting_lists.. | term_infos |
|
||||||
|
+------------------------+---------------------------+------------------------+
|
||||||
|
|
||||||
|
# postings lists
|
||||||
|
+------------------------+---------------------------+------------------------+
|
||||||
|
|
|
||||||
|
+
|
||||||
|
|
||||||
|
# composite file
|
||||||
|
+----------------+-----+----------------+----------------------+----------------+
|
||||||
|
| field file 1 | ... | field field n |composite file footer | footer len: u32|
|
||||||
|
+----------------+-----+----------------+----------------------+----------------+
|
||||||
|
|
||||||
|
# composite file footer
|
||||||
|
|
||||||
|
+-----------------+---------------------------------------+
|
||||||
|
|num fields: vint | (file_addr, offset_delta: vint) []... |
|
||||||
|
+-----------------+---------------------------------------+
|
||||||
|
|
||||||
|
# FileAddr
|
||||||
|
+--------------+--------------+
|
||||||
|
| field: u32 | idx: VInt |
|
||||||
|
+--------------+--------------+
|
||||||
|
|
||||||
|
# Posting lists
|
||||||
|
+-----------------------------------------+
|
||||||
|
| skip_reader
|
||||||
|
+-----------------------------------------+
|
||||||
@@ -112,18 +112,6 @@ fn main() -> tantivy::Result<()> {
|
|||||||
limbs and branches that arch over the pool"
|
limbs and branches that arch over the pool"
|
||||||
));
|
));
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "Of Mice and Men",
|
|
||||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
|
||||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
|
||||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
|
||||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
|
||||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
|
||||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
|
||||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
|
||||||
limbs and branches that arch over the pool"
|
|
||||||
));
|
|
||||||
|
|
||||||
// Multivalued field just need to be repeated.
|
// Multivalued field just need to be repeated.
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
title => "Frankenstein",
|
title => "Frankenstein",
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
|
|||||||
use tantivy::query::QueryParser;
|
use tantivy::query::QueryParser;
|
||||||
use tantivy::schema::Field;
|
use tantivy::schema::Field;
|
||||||
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
|
||||||
use tantivy::{doc, Index, SegmentReader, TantivyError};
|
use tantivy::{doc, Index, Score, SegmentReader, TantivyError};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct Stats {
|
struct Stats {
|
||||||
@@ -114,7 +114,7 @@ struct StatsSegmentCollector {
|
|||||||
impl SegmentCollector for StatsSegmentCollector {
|
impl SegmentCollector for StatsSegmentCollector {
|
||||||
type Fruit = Option<Stats>;
|
type Fruit = Option<Stats>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: f32) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let value = self.fast_field_reader.get(doc) as f64;
|
let value = self.fast_field_reader.get(doc) as f64;
|
||||||
self.stats.count += 1;
|
self.stats.count += 1;
|
||||||
self.stats.sum += value;
|
self.stats.sum += value;
|
||||||
|
|||||||
@@ -117,11 +117,16 @@ fn main() -> tantivy::Result<()> {
|
|||||||
if let Some(mut block_segment_postings) =
|
if let Some(mut block_segment_postings) =
|
||||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
||||||
{
|
{
|
||||||
while block_segment_postings.advance() {
|
loop {
|
||||||
|
let docs = block_segment_postings.docs();
|
||||||
|
if docs.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
// Once again these docs MAY contains deleted documents as well.
|
// Once again these docs MAY contains deleted documents as well.
|
||||||
let docs = block_segment_postings.docs();
|
let docs = block_segment_postings.docs();
|
||||||
// Prints `Docs [0, 2].`
|
// Prints `Docs [0, 2].`
|
||||||
println!("Docs {:?}", docs);
|
println!("Docs {:?}", docs);
|
||||||
|
block_segment_postings.advance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy-query-grammar"
|
name = "tantivy-query-grammar"
|
||||||
version = "0.13.0"
|
version = "0.14.0-dev"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
|
|||||||
@@ -31,22 +31,12 @@ impl Occur {
|
|||||||
|
|
||||||
/// Compose two occur values.
|
/// Compose two occur values.
|
||||||
pub fn compose(left: Occur, right: Occur) -> Occur {
|
pub fn compose(left: Occur, right: Occur) -> Occur {
|
||||||
match left {
|
match (left, right) {
|
||||||
Occur::Should => right,
|
(Occur::Should, _) => right,
|
||||||
Occur::Must => {
|
(Occur::Must, Occur::MustNot) => Occur::MustNot,
|
||||||
if right == Occur::MustNot {
|
(Occur::Must, _) => Occur::Must,
|
||||||
Occur::MustNot
|
(Occur::MustNot, Occur::MustNot) => Occur::Must,
|
||||||
} else {
|
(Occur::MustNot, _) => Occur::MustNot,
|
||||||
Occur::Must
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Occur::MustNot => {
|
|
||||||
if right == Occur::MustNot {
|
|
||||||
Occur::Must
|
|
||||||
} else {
|
|
||||||
Occur::MustNot
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -56,3 +46,27 @@ impl fmt::Display for Occur {
|
|||||||
f.write_char(self.to_char())
|
f.write_char(self.to_char())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use crate::Occur;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_occur_compose() {
|
||||||
|
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
|
||||||
|
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
|
||||||
|
assert_eq!(
|
||||||
|
Occur::compose(Occur::Should, Occur::MustNot),
|
||||||
|
Occur::MustNot
|
||||||
|
);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
|
||||||
|
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
|
||||||
|
assert_eq!(
|
||||||
|
Occur::compose(Occur::MustNot, Occur::Should),
|
||||||
|
Occur::MustNot
|
||||||
|
);
|
||||||
|
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
|
||||||
|
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,8 +9,10 @@ use combine::{
|
|||||||
|
|
||||||
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
fn field<'a>() -> impl Parser<&'a str, Output = String> {
|
||||||
(
|
(
|
||||||
letter(),
|
(letter().or(char('_'))),
|
||||||
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
|
many(satisfy(|c: char| {
|
||||||
|
c.is_alphanumeric() || c == '_' || c == '-'
|
||||||
|
})),
|
||||||
)
|
)
|
||||||
.skip(char(':'))
|
.skip(char(':'))
|
||||||
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
|
||||||
@@ -180,7 +182,7 @@ fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAS
|
|||||||
(optional(occur_symbol()), boosted_leaf())
|
(optional(occur_symbol()), boosted_leaf())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||||
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
(many1(digit()), optional((char('.'), many1(digit())))).map(
|
||||||
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
|
||||||
let mut float_str = int_part;
|
let mut float_str = int_part;
|
||||||
@@ -188,18 +190,18 @@ fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
|
|||||||
float_str.push(chr);
|
float_str.push(chr);
|
||||||
float_str.push_str(&decimal_str);
|
float_str.push_str(&decimal_str);
|
||||||
}
|
}
|
||||||
float_str.parse::<f32>().unwrap()
|
float_str.parse::<f64>().unwrap()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
|
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
|
||||||
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
(char('^'), positive_float_number()).map(|(_, boost)| boost)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
||||||
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
|
||||||
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
|
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
|
||||||
UserInputAST::Boost(Box::new(leaf), boost)
|
UserInputAST::Boost(Box::new(leaf), boost)
|
||||||
}
|
}
|
||||||
_ => leaf,
|
_ => leaf,
|
||||||
@@ -279,14 +281,16 @@ pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
|
|
||||||
|
type TestParseResult = Result<(), StringStreamError>;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use combine::parser::Parser;
|
use combine::parser::Parser;
|
||||||
|
|
||||||
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
pub fn nearly_equals(a: f64, b: f64) -> bool {
|
||||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
(a - b).abs() < 0.0005 * (a + b).abs()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assert_nearly_equals(expected: f32, val: f32) {
|
fn assert_nearly_equals(expected: f64, val: f64) {
|
||||||
assert!(
|
assert!(
|
||||||
nearly_equals(val, expected),
|
nearly_equals(val, expected),
|
||||||
"Got {}, expected {}.",
|
"Got {}, expected {}.",
|
||||||
@@ -296,14 +300,15 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_occur_symbol() {
|
fn test_occur_symbol() -> TestParseResult {
|
||||||
assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
|
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, ""));
|
||||||
assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
|
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, ""));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_positive_float_number() {
|
fn test_positive_float_number() {
|
||||||
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
|
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
|
||||||
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
|
||||||
assert_eq!(remaining, expected_remaining);
|
assert_eq!(remaining, expected_remaining);
|
||||||
assert_nearly_equals(val, expected_val);
|
assert_nearly_equals(val, expected_val);
|
||||||
@@ -311,9 +316,9 @@ mod test {
|
|||||||
fn error_parse(float_str: &str) {
|
fn error_parse(float_str: &str) {
|
||||||
assert!(positive_float_number().parse(float_str).is_err());
|
assert!(positive_float_number().parse(float_str).is_err());
|
||||||
}
|
}
|
||||||
valid_parse("1.0", 1.0f32, "");
|
valid_parse("1.0", 1.0, "");
|
||||||
valid_parse("1", 1.0f32, "");
|
valid_parse("1", 1.0, "");
|
||||||
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
|
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
|
||||||
error_parse(".3332");
|
error_parse(".3332");
|
||||||
error_parse("1.");
|
error_parse("1.");
|
||||||
error_parse("-1.");
|
error_parse("-1.");
|
||||||
@@ -410,6 +415,25 @@ mod test {
|
|||||||
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
assert_eq!(format!("{:?}", ast), "\"abc\"");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_field_name() -> TestParseResult {
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("my-field-name:a")?,
|
||||||
|
("my-field-name".to_string(), "a")
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("my_field_name:a")?,
|
||||||
|
("my_field_name".to_string(), "a")
|
||||||
|
);
|
||||||
|
assert!(super::field().parse(":a").is_err());
|
||||||
|
assert!(super::field().parse("-my_field:a").is_err());
|
||||||
|
assert_eq!(
|
||||||
|
super::field().parse("_my_field:a")?,
|
||||||
|
("_my_field".to_string(), "a")
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_range_parser() {
|
fn test_range_parser() {
|
||||||
// testing the range() parser separately
|
// testing the range() parser separately
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ impl UserInputBound {
|
|||||||
pub enum UserInputAST {
|
pub enum UserInputAST {
|
||||||
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
Clause(Vec<(Option<Occur>, UserInputAST)>),
|
||||||
Leaf(Box<UserInputLeaf>),
|
Leaf(Box<UserInputLeaf>),
|
||||||
Boost(Box<UserInputAST>, f32),
|
Boost(Box<UserInputAST>, f64),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserInputAST {
|
impl UserInputAST {
|
||||||
|
|||||||
@@ -96,18 +96,18 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 1);
|
assert_eq!(count_collector.harvest(), 1);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut count_collector = SegmentCountCollector::default();
|
let mut count_collector = SegmentCountCollector::default();
|
||||||
count_collector.collect(0u32, 1f32);
|
count_collector.collect(0u32, 1.0);
|
||||||
count_collector.collect(1u32, 1f32);
|
count_collector.collect(1u32, 1.0);
|
||||||
assert_eq!(count_collector.harvest(), 2);
|
assert_eq!(count_collector.harvest(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
|
|||||||
|
|
||||||
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
|
||||||
where
|
where
|
||||||
TCustomScorer: CustomScorer<TScore>,
|
TCustomScorer: CustomScorer<TScore> + Send + Sync,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
|
|||||||
@@ -472,7 +472,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let num_facets: usize = 3 * 4 * 5;
|
let num_facets: usize = 3 * 4 * 5;
|
||||||
let facets: Vec<Facet> = (0..num_facets)
|
let facets: Vec<Facet> = (0..num_facets)
|
||||||
.map(|mut n| {
|
.map(|mut n| {
|
||||||
@@ -531,7 +531,7 @@ mod tests {
|
|||||||
let facet_field = schema_builder.add_facet_field("facets");
|
let facet_field = schema_builder.add_facet_field("facets");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/subjects/A/a"),
|
facet_field => Facet::from_text(&"/subjects/A/a"),
|
||||||
facet_field => Facet::from_text(&"/subjects/B/a"),
|
facet_field => Facet::from_text(&"/subjects/B/a"),
|
||||||
@@ -550,12 +550,12 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_doc_search_by_facet() {
|
fn test_doc_search_by_facet() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
let facet_field = schema_builder.add_facet_field("facet");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/A/A"),
|
facet_field => Facet::from_text(&"/A/A"),
|
||||||
));
|
));
|
||||||
@@ -568,8 +568,8 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from_text(&"/D/C/A"),
|
facet_field => Facet::from_text(&"/D/C/A"),
|
||||||
));
|
));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.num_docs(), 4);
|
assert_eq!(searcher.num_docs(), 4);
|
||||||
|
|
||||||
@@ -586,17 +586,17 @@ mod tests {
|
|||||||
assert_eq!(count_facet("/A/C"), 1);
|
assert_eq!(count_facet("/A/C"), 1);
|
||||||
assert_eq!(count_facet("/A/C/A"), 1);
|
assert_eq!(count_facet("/A/C/A"), 1);
|
||||||
assert_eq!(count_facet("/C/A"), 0);
|
assert_eq!(count_facet("/C/A"), 0);
|
||||||
|
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||||
{
|
{
|
||||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
let query = query_parser.parse_query("facet:/A/B")?;
|
||||||
{
|
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||||
let query = query_parser.parse_query("facet:/A/B").unwrap();
|
|
||||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("facet:/A").unwrap();
|
|
||||||
assert_eq!(3, searcher.search(&query, &Count).unwrap());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
let query = query_parser.parse_query("facet:/A")?;
|
||||||
|
assert_eq!(3, searcher.search(&query, &Count)?);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -631,7 +631,7 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
@@ -684,7 +684,7 @@ mod bench {
|
|||||||
// 40425 docs
|
// 40425 docs
|
||||||
docs[..].shuffle(&mut thread_rng());
|
docs[..].shuffle(&mut thread_rng());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for doc in docs {
|
for doc in docs {
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
{
|
{
|
||||||
for i in 0u64..10u64 {
|
for i in 0u64..10u64 {
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
|
|||||||
/// The collection logic itself is in the `SegmentCollector`.
|
/// The collection logic itself is in the `SegmentCollector`.
|
||||||
///
|
///
|
||||||
/// Segments are not guaranteed to be visited in any specific order.
|
/// Segments are not guaranteed to be visited in any specific order.
|
||||||
pub trait Collector: Sync {
|
pub trait Collector: Sync + Send {
|
||||||
/// `Fruit` is the type for the result of our collection.
|
/// `Fruit` is the type for the result of our collection.
|
||||||
/// e.g. `usize` for the `Count` collector.
|
/// e.g. `usize` for the `Count` collector.
|
||||||
type Fruit: Fruit;
|
type Fruit: Fruit;
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
|||||||
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
||||||
type Fruit = Box<dyn Fruit>;
|
type Fruit = Box<dyn Fruit>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, score: f32) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
self.as_mut().collect(doc, score);
|
self.as_mut().collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub trait BoxableSegmentCollector {
|
pub trait BoxableSegmentCollector {
|
||||||
fn collect(&mut self, doc: u32, score: f32);
|
fn collect(&mut self, doc: u32, score: Score);
|
||||||
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
|
|||||||
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
|
||||||
for SegmentCollectorWrapper<TSegmentCollector>
|
for SegmentCollectorWrapper<TSegmentCollector>
|
||||||
{
|
{
|
||||||
fn collect(&mut self, doc: u32, score: f32) {
|
fn collect(&mut self, doc: u32, score: Score) {
|
||||||
self.0.collect(doc, score);
|
self.0.collect(doc, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -259,7 +259,7 @@ mod tests {
|
|||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text=>"abc"));
|
index_writer.add_document(doc!(text=>"abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc abc"));
|
||||||
index_writer.add_document(doc!(text=>"abc abc"));
|
index_writer.add_document(doc!(text=>"abc abc"));
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ impl Collector for BytesFastFieldTestCollector {
|
|||||||
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
impl SegmentCollector for BytesFastFieldSegmentCollector {
|
||||||
type Fruit = Vec<u8>;
|
type Fruit = Vec<u8>;
|
||||||
|
|
||||||
fn collect(&mut self, doc: u32, _score: f32) {
|
fn collect(&mut self, doc: u32, _score: Score) {
|
||||||
let data = self.reader.get_bytes(doc);
|
let data = self.reader.get_bytes(doc);
|
||||||
self.vals.extend(data);
|
self.vals.extend(data);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ use std::fmt;
|
|||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
@@ -52,8 +52,8 @@ use std::fmt;
|
|||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
/// let query = query_parser.parse_query("diary").unwrap();
|
||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||||
///
|
///
|
||||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
|
||||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||||
/// ```
|
/// ```
|
||||||
pub struct TopDocs(TopCollector<Score>);
|
pub struct TopDocs(TopCollector<Score>);
|
||||||
|
|
||||||
@@ -123,7 +123,7 @@ impl TopDocs {
|
|||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||||
@@ -139,8 +139,8 @@ impl TopDocs {
|
|||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
|
||||||
///
|
///
|
||||||
/// assert_eq!(top_docs.len(), 2);
|
/// assert_eq!(top_docs.len(), 2);
|
||||||
/// assert_eq!(&top_docs[0], &(0.5204813, DocAddress(0, 4)));
|
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
|
||||||
/// assert_eq!(&top_docs[1], &(0.4793185, DocAddress(0, 3)));
|
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
|
||||||
/// ```
|
/// ```
|
||||||
pub fn and_offset(self, offset: usize) -> TopDocs {
|
pub fn and_offset(self, offset: usize) -> TopDocs {
|
||||||
TopDocs(self.0.and_offset(offset))
|
TopDocs(self.0.and_offset(offset))
|
||||||
@@ -163,7 +163,7 @@ impl TopDocs {
|
|||||||
/// # let schema = schema_builder.build();
|
/// # let schema = schema_builder.build();
|
||||||
/// #
|
/// #
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||||
@@ -264,7 +264,7 @@ impl TopDocs {
|
|||||||
/// fn create_index() -> tantivy::Result<Index> {
|
/// fn create_index() -> tantivy::Result<Index> {
|
||||||
/// let schema = create_schema();
|
/// let schema = create_schema();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||||
@@ -303,7 +303,7 @@ impl TopDocs {
|
|||||||
/// let popularity: u64 = popularity_reader.get(doc);
|
/// let popularity: u64 = popularity_reader.get(doc);
|
||||||
/// // Well.. For the sake of the example we use a simple logarithm
|
/// // Well.. For the sake of the example we use a simple logarithm
|
||||||
/// // function.
|
/// // function.
|
||||||
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
|
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||||
/// popularity_boost_score * original_score
|
/// popularity_boost_score * original_score
|
||||||
/// }
|
/// }
|
||||||
/// });
|
/// });
|
||||||
@@ -324,7 +324,7 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
|
||||||
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
|
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync,
|
||||||
{
|
{
|
||||||
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
|
||||||
}
|
}
|
||||||
@@ -371,7 +371,7 @@ impl TopDocs {
|
|||||||
/// # fn main() -> tantivy::Result<()> {
|
/// # fn main() -> tantivy::Result<()> {
|
||||||
/// # let schema = create_schema();
|
/// # let schema = create_schema();
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// #
|
/// #
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
@@ -438,7 +438,7 @@ impl TopDocs {
|
|||||||
where
|
where
|
||||||
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
TScore: 'static + Send + Sync + Clone + PartialOrd,
|
||||||
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
|
||||||
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
|
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync,
|
||||||
{
|
{
|
||||||
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
|
||||||
}
|
}
|
||||||
@@ -479,7 +479,7 @@ impl Collector for TopDocs {
|
|||||||
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
|
||||||
|
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
let mut threshold = f32::MIN;
|
let mut threshold = Score::MIN;
|
||||||
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
|
||||||
if delete_bitset.is_deleted(doc) {
|
if delete_bitset.is_deleted(doc) {
|
||||||
return threshold;
|
return threshold;
|
||||||
@@ -491,16 +491,16 @@ impl Collector for TopDocs {
|
|||||||
if heap.len() < heap_len {
|
if heap.len() < heap_len {
|
||||||
heap.push(heap_item);
|
heap.push(heap_item);
|
||||||
if heap.len() == heap_len {
|
if heap.len() == heap_len {
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
|
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
}
|
}
|
||||||
return threshold;
|
return threshold;
|
||||||
}
|
}
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
*heap.peek_mut().unwrap() = heap_item;
|
||||||
threshold = heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN);
|
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
threshold
|
threshold
|
||||||
})?;
|
})?;
|
||||||
} else {
|
} else {
|
||||||
weight.for_each_pruning(f32::MIN, reader, &mut |doc, score| {
|
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
|
||||||
let heap_item = ComparableDoc {
|
let heap_item = ComparableDoc {
|
||||||
feature: score,
|
feature: score,
|
||||||
doc,
|
doc,
|
||||||
@@ -509,13 +509,13 @@ impl Collector for TopDocs {
|
|||||||
heap.push(heap_item);
|
heap.push(heap_item);
|
||||||
// TODO the threshold is suboptimal for heap.len == heap_len
|
// TODO the threshold is suboptimal for heap.len == heap_len
|
||||||
if heap.len() == heap_len {
|
if heap.len() == heap_len {
|
||||||
return heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
|
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
|
||||||
} else {
|
} else {
|
||||||
return f32::MIN;
|
return Score::MIN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*heap.peek_mut().unwrap() = heap_item;
|
*heap.peek_mut().unwrap() = heap_item;
|
||||||
heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN)
|
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -561,7 +561,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
||||||
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
||||||
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
||||||
@@ -570,6 +570,13 @@ mod tests {
|
|||||||
index
|
index
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
|
||||||
|
for (result, expected) in results.iter().zip(expected.iter()) {
|
||||||
|
assert_eq!(result.1, expected.1);
|
||||||
|
crate::assert_nearly_equals!(result.0, expected.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_top_collector_not_at_capacity() {
|
fn test_top_collector_not_at_capacity() {
|
||||||
let index = make_index();
|
let index = make_index();
|
||||||
@@ -582,13 +589,13 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(4))
|
.search(&text_query, &TopDocs::with_limit(4))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs,
|
||||||
vec![
|
&[
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
(0.48527452, DocAddress(0, 0))
|
(0.48527452, DocAddress(0, 0)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -604,7 +611,7 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(score_docs, vec![(0.48527452, DocAddress(0, 0))]);
|
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -619,12 +626,12 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(2))
|
.search(&text_query, &TopDocs::with_limit(2))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs,
|
||||||
vec![
|
&[
|
||||||
(0.81221175, DocAddress(0u32, 1)),
|
(0.81221175, DocAddress(0u32, 1)),
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -640,12 +647,12 @@ mod tests {
|
|||||||
.searcher()
|
.searcher()
|
||||||
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_results_equals(
|
||||||
score_docs,
|
&score_docs[..],
|
||||||
vec![
|
&[
|
||||||
(0.5376842, DocAddress(0u32, 2)),
|
(0.5376842, DocAddress(0u32, 2)),
|
||||||
(0.48527452, DocAddress(0, 0))
|
(0.48527452, DocAddress(0, 0)),
|
||||||
]
|
],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -706,8 +713,8 @@ mod tests {
|
|||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
top_docs,
|
&top_docs[..],
|
||||||
vec![
|
&[
|
||||||
(64, DocAddress(0, 1)),
|
(64, DocAddress(0, 1)),
|
||||||
(16, DocAddress(0, 2)),
|
(16, DocAddress(0, 2)),
|
||||||
(12, DocAddress(0, 0))
|
(12, DocAddress(0, 0))
|
||||||
@@ -814,7 +821,7 @@ mod tests {
|
|||||||
) -> (Index, Box<dyn Query>) {
|
) -> (Index, Box<dyn Query>) {
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
||||||
doc_adder(&mut index_writer);
|
doc_adder(&mut index_writer);
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
|
|||||||
|
|
||||||
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
|
||||||
where
|
where
|
||||||
TScoreTweaker: ScoreTweaker<TScore>,
|
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync,
|
||||||
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
TScore: 'static + PartialOrd + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
type Fruit = Vec<(TScore, DocAddress)>;
|
type Fruit = Vec<(TScore, DocAddress)>;
|
||||||
|
|||||||
@@ -10,7 +10,9 @@ pub(crate) use self::bitset::TinySet;
|
|||||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||||
pub use self::counting_writer::CountingWriter;
|
pub use self::counting_writer::CountingWriter;
|
||||||
pub use self::serialize::{BinarySerializable, FixedSize};
|
pub use self::serialize::{BinarySerializable, FixedSize};
|
||||||
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
|
pub use self::vint::{
|
||||||
|
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
|
||||||
|
};
|
||||||
pub use byteorder::LittleEndian as Endianness;
|
pub use byteorder::LittleEndian as Endianness;
|
||||||
|
|
||||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||||
|
|||||||
@@ -89,6 +89,19 @@ impl FixedSize for u64 {
|
|||||||
const SIZE_IN_BYTES: usize = 8;
|
const SIZE_IN_BYTES: usize = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BinarySerializable for f32 {
|
||||||
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_f32::<Endianness>(*self)
|
||||||
|
}
|
||||||
|
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
reader.read_f32::<Endianness>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedSize for f32 {
|
||||||
|
const SIZE_IN_BYTES: usize = 4;
|
||||||
|
}
|
||||||
|
|
||||||
impl BinarySerializable for i64 {
|
impl BinarySerializable for i64 {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_i64::<Endianness>(*self)
|
writer.write_i64::<Endianness>(*self)
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ use std::io::Read;
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
/// Wrapper over a `u64` that serializes as a variable int.
|
/// Wrapper over a `u64` that serializes as a variable int.
|
||||||
#[derive(Debug, Eq, PartialEq)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
pub struct VInt(pub u64);
|
pub struct VInt(pub u64);
|
||||||
|
|
||||||
const STOP_BIT: u8 = 128;
|
const STOP_BIT: u8 = 128;
|
||||||
|
|
||||||
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
|
||||||
const START_2: u64 = 1 << 7;
|
const START_2: u64 = 1 << 7;
|
||||||
const START_3: u64 = 1 << 14;
|
const START_3: u64 = 1 << 14;
|
||||||
const START_4: u64 = 1 << 21;
|
const START_4: u64 = 1 << 21;
|
||||||
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
|||||||
|
|
||||||
let val = u64::from(val);
|
let val = u64::from(val);
|
||||||
const STOP_BIT: u64 = 128u64;
|
const STOP_BIT: u64 = 128u64;
|
||||||
match val {
|
let (res, num_bytes) = match val {
|
||||||
0..=STOP_1 => (val | STOP_BIT, 1),
|
0..=STOP_1 => (val | STOP_BIT, 1),
|
||||||
START_2..=STOP_2 => (
|
START_2..=STOP_2 => (
|
||||||
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
|
||||||
@@ -56,7 +56,9 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
|||||||
| (STOP_BIT << (8 * 4)),
|
| (STOP_BIT << (8 * 4)),
|
||||||
5,
|
5,
|
||||||
),
|
),
|
||||||
}
|
};
|
||||||
|
LittleEndian::write_u64(&mut buf[..], res);
|
||||||
|
&buf[0..num_bytes]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of bytes covered by a
|
/// Returns the number of bytes covered by a
|
||||||
@@ -85,23 +87,26 @@ fn vint_len(data: &[u8]) -> usize {
|
|||||||
/// If the buffer does not start by a valid
|
/// If the buffer does not start by a valid
|
||||||
/// vint payload
|
/// vint payload
|
||||||
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
|
||||||
let vlen = vint_len(*data);
|
let (result, vlen) = read_u32_vint_no_advance(*data);
|
||||||
|
*data = &data[vlen..];
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
|
||||||
|
let vlen = vint_len(data);
|
||||||
let mut result = 0u32;
|
let mut result = 0u32;
|
||||||
let mut shift = 0u64;
|
let mut shift = 0u64;
|
||||||
for &b in &data[..vlen] {
|
for &b in &data[..vlen] {
|
||||||
result |= u32::from(b & 127u8) << shift;
|
result |= u32::from(b & 127u8) << shift;
|
||||||
shift += 7;
|
shift += 7;
|
||||||
}
|
}
|
||||||
*data = &data[vlen..];
|
(result, vlen)
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write a `u32` as a vint payload.
|
/// Write a `u32` as a vint payload.
|
||||||
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
|
||||||
let (val, num_bytes) = serialize_vint_u32(val);
|
let mut buf = [0u8; 8];
|
||||||
let mut buffer = [0u8; 8];
|
let data = serialize_vint_u32(val, &mut buf);
|
||||||
LittleEndian::write_u64(&mut buffer, val);
|
writer.write_all(&data)
|
||||||
writer.write_all(&buffer[..num_bytes])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VInt {
|
impl VInt {
|
||||||
@@ -172,7 +177,6 @@ mod tests {
|
|||||||
use super::serialize_vint_u32;
|
use super::serialize_vint_u32;
|
||||||
use super::VInt;
|
use super::VInt;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use byteorder::{ByteOrder, LittleEndian};
|
|
||||||
|
|
||||||
fn aux_test_vint(val: u64) {
|
fn aux_test_vint(val: u64) {
|
||||||
let mut v = [14u8; 10];
|
let mut v = [14u8; 10];
|
||||||
@@ -208,12 +212,10 @@ mod tests {
|
|||||||
|
|
||||||
fn aux_test_serialize_vint_u32(val: u32) {
|
fn aux_test_serialize_vint_u32(val: u32) {
|
||||||
let mut buffer = [0u8; 10];
|
let mut buffer = [0u8; 10];
|
||||||
let mut buffer2 = [0u8; 10];
|
let mut buffer2 = [0u8; 8];
|
||||||
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
|
||||||
let (vint, len) = serialize_vint_u32(val);
|
let res2 = serialize_vint_u32(val, &mut buffer2);
|
||||||
assert_eq!(len, len_vint, "len wrong for val {}", val);
|
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
|
||||||
LittleEndian::write_u64(&mut buffer2, vint);
|
|
||||||
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -24,8 +24,10 @@ use crate::IndexWriter;
|
|||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::Path;
|
||||||
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
fn load_metas(
|
fn load_metas(
|
||||||
@@ -281,7 +283,7 @@ impl Index {
|
|||||||
TantivyError::LockFailure(
|
TantivyError::LockFailure(
|
||||||
err,
|
err,
|
||||||
Some(
|
Some(
|
||||||
"Failed to acquire index lock. If you are using\
|
"Failed to acquire index lock. If you are using \
|
||||||
a regular directory, this means there is already an \
|
a regular directory, this means there is already an \
|
||||||
`IndexWriter` working on this `Directory`, in this process \
|
`IndexWriter` working on this `Directory`, in this process \
|
||||||
or in a different process."
|
or in a different process."
|
||||||
@@ -298,6 +300,15 @@ impl Index {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper to create an index writer for tests.
|
||||||
|
///
|
||||||
|
/// That index writer only simply has a single thread and a heap of 5 MB.
|
||||||
|
/// Using a single thread gives us a deterministic allocation of DocId.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
|
||||||
|
self.writer_with_num_threads(1, 10_000_000)
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a multithreaded writer
|
/// Creates a multithreaded writer
|
||||||
///
|
///
|
||||||
/// Tantivy will automatically define the number of threads to use.
|
/// Tantivy will automatically define the number of threads to use.
|
||||||
@@ -500,7 +511,7 @@ mod tests {
|
|||||||
let schema = throw_away_schema();
|
let schema = throw_away_schema();
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
let mut index = Index::create_from_tempdir(schema).unwrap();
|
let mut index = Index::create_from_tempdir(schema).unwrap();
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
@@ -537,23 +548,33 @@ mod tests {
|
|||||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
||||||
let mut reader_index = reader.index();
|
let mut reader_index = reader.index();
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||||
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
||||||
let _ = sender.send(());
|
let _ = sender.send(());
|
||||||
}));
|
}));
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut writer = index.writer_for_tests().unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
// We need a loop here because it is possible for notify to send more than
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
// one modify event. It was observed on CI on MacOS.
|
||||||
|
loop {
|
||||||
|
assert!(receiver.recv().is_ok());
|
||||||
|
if reader.searcher().num_docs() == 1 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
writer.add_document(doc!(field=>2u64));
|
writer.add_document(doc!(field=>2u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
// ... Same as above
|
||||||
assert_eq!(reader.searcher().num_docs(), 2);
|
loop {
|
||||||
|
assert!(receiver.recv().is_ok());
|
||||||
|
if reader.searcher().num_docs() == 2 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test will not pass on windows, because windows
|
// This test will not pass on windows, because windows
|
||||||
|
|||||||
@@ -116,6 +116,7 @@ impl SegmentMeta {
|
|||||||
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
SegmentComponent::FASTFIELDS => ".fast".to_string(),
|
||||||
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
|
||||||
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
|
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
|
||||||
|
SegmentComponent::FIELDSTATS => ".fieldstats".to_string(),
|
||||||
});
|
});
|
||||||
PathBuf::from(path)
|
PathBuf::from(path)
|
||||||
}
|
}
|
||||||
@@ -213,7 +214,7 @@ pub struct IndexMeta {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
/// Payload associated to the last commit.
|
/// Payload associated to the last commit.
|
||||||
///
|
///
|
||||||
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
|
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||||
/// to help identify this commit.
|
/// to help identify this commit.
|
||||||
/// This payload is entirely unused by tantivy.
|
/// This payload is entirely unused by tantivy.
|
||||||
pub payload: Option<String>,
|
pub payload: Option<String>,
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
use crate::common::BinarySerializable;
|
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::positions::PositionReader;
|
use crate::positions::PositionReader;
|
||||||
use crate::postings::TermInfo;
|
use crate::postings::TermInfo;
|
||||||
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
use crate::postings::{BlockSegmentPostings, SegmentPostings};
|
||||||
use crate::schema::FieldType;
|
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
@@ -37,14 +35,12 @@ impl InvertedIndexReader {
|
|||||||
postings_source: ReadOnlySource,
|
postings_source: ReadOnlySource,
|
||||||
positions_source: ReadOnlySource,
|
positions_source: ReadOnlySource,
|
||||||
positions_idx_source: ReadOnlySource,
|
positions_idx_source: ReadOnlySource,
|
||||||
|
total_num_tokens: u64,
|
||||||
record_option: IndexRecordOption,
|
record_option: IndexRecordOption,
|
||||||
) -> InvertedIndexReader {
|
) -> InvertedIndexReader {
|
||||||
let total_num_tokens_data = postings_source.slice(0, 8);
|
|
||||||
let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
|
|
||||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
|
|
||||||
InvertedIndexReader {
|
InvertedIndexReader {
|
||||||
termdict,
|
termdict,
|
||||||
postings_source: postings_source.slice_from(8),
|
postings_source,
|
||||||
positions_source,
|
positions_source,
|
||||||
positions_idx_source,
|
positions_idx_source,
|
||||||
record_option,
|
record_option,
|
||||||
@@ -54,10 +50,7 @@ impl InvertedIndexReader {
|
|||||||
|
|
||||||
/// Creates an empty `InvertedIndexReader` object, which
|
/// Creates an empty `InvertedIndexReader` object, which
|
||||||
/// contains no terms at all.
|
/// contains no terms at all.
|
||||||
pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
|
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
|
||||||
let record_option = field_type
|
|
||||||
.get_index_record_option()
|
|
||||||
.unwrap_or(IndexRecordOption::Basic);
|
|
||||||
InvertedIndexReader {
|
InvertedIndexReader {
|
||||||
termdict: TermDictionary::empty(),
|
termdict: TermDictionary::empty(),
|
||||||
postings_source: ReadOnlySource::empty(),
|
postings_source: ReadOnlySource::empty(),
|
||||||
@@ -93,7 +86,7 @@ impl InvertedIndexReader {
|
|||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
block_postings: &mut BlockSegmentPostings,
|
block_postings: &mut BlockSegmentPostings,
|
||||||
) {
|
) {
|
||||||
let offset = term_info.postings_offset as usize;
|
let offset = term_info.postings_start_offset as usize;
|
||||||
let end_source = self.postings_source.len();
|
let end_source = self.postings_source.len();
|
||||||
let postings_slice = self.postings_source.slice(offset, end_source);
|
let postings_slice = self.postings_source.slice(offset, end_source);
|
||||||
block_postings.reset(term_info.doc_freq, postings_slice);
|
block_postings.reset(term_info.doc_freq, postings_slice);
|
||||||
@@ -121,8 +114,10 @@ impl InvertedIndexReader {
|
|||||||
term_info: &TermInfo,
|
term_info: &TermInfo,
|
||||||
requested_option: IndexRecordOption,
|
requested_option: IndexRecordOption,
|
||||||
) -> BlockSegmentPostings {
|
) -> BlockSegmentPostings {
|
||||||
let offset = term_info.postings_offset as usize;
|
let postings_data = self.postings_source.slice(
|
||||||
let postings_data = self.postings_source.slice_from(offset);
|
term_info.postings_start_offset as usize,
|
||||||
|
term_info.postings_end_offset as usize,
|
||||||
|
);
|
||||||
BlockSegmentPostings::from_data(
|
BlockSegmentPostings::from_data(
|
||||||
term_info.doc_freq,
|
term_info.doc_freq,
|
||||||
postings_data,
|
postings_data,
|
||||||
|
|||||||
@@ -24,14 +24,17 @@ pub enum SegmentComponent {
|
|||||||
/// Accessing a document from the store is relatively slow, as it
|
/// Accessing a document from the store is relatively slow, as it
|
||||||
/// requires to decompress the entire block it belongs to.
|
/// requires to decompress the entire block it belongs to.
|
||||||
STORE,
|
STORE,
|
||||||
|
|
||||||
/// Bitset describing which document of the segment is deleted.
|
/// Bitset describing which document of the segment is deleted.
|
||||||
DELETE,
|
DELETE,
|
||||||
|
|
||||||
|
FIELDSTATS,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentComponent {
|
impl SegmentComponent {
|
||||||
/// Iterates through the components.
|
/// Iterates through the components.
|
||||||
pub fn iterator() -> slice::Iter<'static, SegmentComponent> {
|
pub fn iterator() -> slice::Iter<'static, SegmentComponent> {
|
||||||
static SEGMENT_COMPONENTS: [SegmentComponent; 8] = [
|
static SEGMENT_COMPONENTS: [SegmentComponent; 9] = [
|
||||||
SegmentComponent::POSTINGS,
|
SegmentComponent::POSTINGS,
|
||||||
SegmentComponent::POSITIONS,
|
SegmentComponent::POSITIONS,
|
||||||
SegmentComponent::POSITIONSSKIP,
|
SegmentComponent::POSITIONSSKIP,
|
||||||
@@ -40,6 +43,7 @@ impl SegmentComponent {
|
|||||||
SegmentComponent::TERMS,
|
SegmentComponent::TERMS,
|
||||||
SegmentComponent::STORE,
|
SegmentComponent::STORE,
|
||||||
SegmentComponent::DELETE,
|
SegmentComponent::DELETE,
|
||||||
|
SegmentComponent::FIELDSTATS,
|
||||||
];
|
];
|
||||||
SEGMENT_COMPONENTS.iter()
|
SEGMENT_COMPONENTS.iter()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use crate::common::CompositeFile;
|
|
||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
use crate::core::InvertedIndexReader;
|
use crate::core::InvertedIndexReader;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
@@ -8,14 +7,15 @@ use crate::directory::ReadOnlySource;
|
|||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::fastfield::FacetReader;
|
use crate::fastfield::FacetReader;
|
||||||
use crate::fastfield::FastFieldReaders;
|
use crate::fastfield::FastFieldReaders;
|
||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||||
use crate::schema::Field;
|
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
|
use crate::schema::{Field, IndexRecordOption};
|
||||||
use crate::space_usage::SegmentSpaceUsage;
|
use crate::space_usage::SegmentSpaceUsage;
|
||||||
use crate::store::StoreReader;
|
use crate::store::StoreReader;
|
||||||
use crate::termdict::TermDictionary;
|
use crate::termdict::TermDictionary;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use crate::{common::CompositeFile, postings::FieldStats};
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
@@ -48,7 +48,8 @@ pub struct SegmentReader {
|
|||||||
positions_composite: CompositeFile,
|
positions_composite: CompositeFile,
|
||||||
positions_idx_composite: CompositeFile,
|
positions_idx_composite: CompositeFile,
|
||||||
fast_fields_readers: Arc<FastFieldReaders>,
|
fast_fields_readers: Arc<FastFieldReaders>,
|
||||||
fieldnorms_composite: CompositeFile,
|
fieldnorm_readers: FieldNormReaders,
|
||||||
|
field_stats: FieldStats,
|
||||||
|
|
||||||
store_source: ReadOnlySource,
|
store_source: ReadOnlySource,
|
||||||
delete_bitset_opt: Option<DeleteBitSet>,
|
delete_bitset_opt: Option<DeleteBitSet>,
|
||||||
@@ -125,17 +126,15 @@ impl SegmentReader {
|
|||||||
///
|
///
|
||||||
/// They are simply stored as a fast field, serialized in
|
/// They are simply stored as a fast field, serialized in
|
||||||
/// the `.fieldnorm` file of the segment.
|
/// the `.fieldnorm` file of the segment.
|
||||||
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
|
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
|
||||||
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) {
|
self.fieldnorm_readers.get_field(field).ok_or_else(|| {
|
||||||
FieldNormReader::open(fieldnorm_source)
|
|
||||||
} else {
|
|
||||||
let field_name = self.schema.get_field_name(field);
|
let field_name = self.schema.get_field_name(field);
|
||||||
let err_msg = format!(
|
let err_msg = format!(
|
||||||
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
|
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
|
||||||
field_name
|
field_name
|
||||||
);
|
);
|
||||||
panic!(err_msg);
|
crate::TantivyError::SchemaError(err_msg)
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the segment's `StoreReader`.
|
/// Accessor to the segment's `StoreReader`.
|
||||||
@@ -178,8 +177,11 @@ impl SegmentReader {
|
|||||||
let fast_field_readers =
|
let fast_field_readers =
|
||||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
||||||
|
|
||||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
|
|
||||||
|
let field_stats_data = segment.open_read(SegmentComponent::FIELDSTATS)?;
|
||||||
|
let field_stats = FieldStats::from_source(field_stats_data.as_slice())?;
|
||||||
|
|
||||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||||
@@ -195,7 +197,8 @@ impl SegmentReader {
|
|||||||
termdict_composite,
|
termdict_composite,
|
||||||
postings_composite,
|
postings_composite,
|
||||||
fast_fields_readers: fast_field_readers,
|
fast_fields_readers: fast_field_readers,
|
||||||
fieldnorms_composite,
|
fieldnorm_readers,
|
||||||
|
field_stats,
|
||||||
segment_id: segment.id(),
|
segment_id: segment.id(),
|
||||||
store_source,
|
store_source,
|
||||||
delete_bitset_opt,
|
delete_bitset_opt,
|
||||||
@@ -212,6 +215,11 @@ impl SegmentReader {
|
|||||||
/// The field reader is in charge of iterating through the
|
/// The field reader is in charge of iterating through the
|
||||||
/// term dictionary associated to a specific field,
|
/// term dictionary associated to a specific field,
|
||||||
/// and opening the posting list associated to any term.
|
/// and opening the posting list associated to any term.
|
||||||
|
///
|
||||||
|
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||||
|
/// is returned.
|
||||||
|
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||||
|
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
||||||
if let Some(inv_idx_reader) = self
|
if let Some(inv_idx_reader) = self
|
||||||
.inv_idx_reader_cache
|
.inv_idx_reader_cache
|
||||||
@@ -226,21 +234,21 @@ impl SegmentReader {
|
|||||||
let record_option_opt = field_type.get_index_record_option();
|
let record_option_opt = field_type.get_index_record_option();
|
||||||
|
|
||||||
if record_option_opt.is_none() {
|
if record_option_opt.is_none() {
|
||||||
panic!("Field {:?} does not seem indexed.", field_entry.name());
|
warn!("Field {:?} does not seem indexed.", field_entry.name());
|
||||||
}
|
}
|
||||||
|
|
||||||
let record_option = record_option_opt.unwrap();
|
|
||||||
|
|
||||||
let postings_source_opt = self.postings_composite.open_read(field);
|
let postings_source_opt = self.postings_composite.open_read(field);
|
||||||
|
|
||||||
if postings_source_opt.is_none() {
|
if postings_source_opt.is_none() || record_option_opt.is_none() {
|
||||||
// no documents in the segment contained this field.
|
// no documents in the segment contained this field.
|
||||||
// As a result, no data is associated to the inverted index.
|
// As a result, no data is associated to the inverted index.
|
||||||
//
|
//
|
||||||
// Returns an empty inverted index.
|
// Returns an empty inverted index.
|
||||||
return Arc::new(InvertedIndexReader::empty(field_type));
|
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||||
|
return Arc::new(InvertedIndexReader::empty(record_option));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let record_option = record_option_opt.unwrap();
|
||||||
let postings_source = postings_source_opt.unwrap();
|
let postings_source = postings_source_opt.unwrap();
|
||||||
|
|
||||||
let termdict_source = self.termdict_composite.open_read(field).expect(
|
let termdict_source = self.termdict_composite.open_read(field).expect(
|
||||||
@@ -257,11 +265,17 @@ impl SegmentReader {
|
|||||||
.open_read(field)
|
.open_read(field)
|
||||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||||
|
|
||||||
|
let total_num_tokens = self
|
||||||
|
.field_stats
|
||||||
|
.get(field)
|
||||||
|
.map(|field_stat| field_stat.num_tokens())
|
||||||
|
.unwrap_or(0u64);
|
||||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||||
TermDictionary::from_source(&termdict_source),
|
TermDictionary::from_source(&termdict_source),
|
||||||
postings_source,
|
postings_source,
|
||||||
positions_source,
|
positions_source,
|
||||||
positions_idx_source,
|
positions_idx_source,
|
||||||
|
total_num_tokens,
|
||||||
record_option,
|
record_option,
|
||||||
));
|
));
|
||||||
|
|
||||||
@@ -295,8 +309,8 @@ impl SegmentReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator that will iterate over the alive document ids
|
/// Returns an iterator that will iterate over the alive document ids
|
||||||
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator<'_> {
|
pub fn doc_ids_alive<'a>(&'a self) -> impl Iterator<Item = DocId> + 'a {
|
||||||
SegmentReaderAliveDocsIterator::new(&self)
|
(0u32..self.max_doc).filter(move |doc| !self.is_deleted(*doc))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Summarize total space usage of this segment.
|
/// Summarize total space usage of this segment.
|
||||||
@@ -308,7 +322,7 @@ impl SegmentReader {
|
|||||||
self.positions_composite.space_usage(),
|
self.positions_composite.space_usage(),
|
||||||
self.positions_idx_composite.space_usage(),
|
self.positions_idx_composite.space_usage(),
|
||||||
self.fast_fields_readers.space_usage(),
|
self.fast_fields_readers.space_usage(),
|
||||||
self.fieldnorms_composite.space_usage(),
|
self.fieldnorm_readers.space_usage(),
|
||||||
self.get_store_reader().space_usage(),
|
self.get_store_reader().space_usage(),
|
||||||
self.delete_bitset_opt
|
self.delete_bitset_opt
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@@ -324,52 +338,6 @@ impl fmt::Debug for SegmentReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the iterator trait to allow easy iteration
|
|
||||||
/// over non-deleted ("alive") DocIds in a SegmentReader
|
|
||||||
pub struct SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
reader: &'a SegmentReader,
|
|
||||||
max_doc: DocId,
|
|
||||||
current: DocId,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
SegmentReaderAliveDocsIterator {
|
|
||||||
reader,
|
|
||||||
max_doc: reader.max_doc(),
|
|
||||||
current: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Iterator for SegmentReaderAliveDocsIterator<'a> {
|
|
||||||
type Item = DocId;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
// TODO: Use TinySet (like in BitSetDocSet) to speed this process up
|
|
||||||
if self.current >= self.max_doc {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// find the next alive doc id
|
|
||||||
while self.reader.is_deleted(self.current) {
|
|
||||||
self.current += 1;
|
|
||||||
|
|
||||||
if self.current >= self.max_doc {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// capture the current alive DocId
|
|
||||||
let result = Some(self.current);
|
|
||||||
|
|
||||||
// move down the chain
|
|
||||||
self.current += 1;
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
@@ -385,7 +353,7 @@ mod test {
|
|||||||
let name = schema.get_field("name").unwrap();
|
let name = schema.get_field("name").unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(name => "tantivy"));
|
index_writer.add_document(doc!(name => "tantivy"));
|
||||||
index_writer.add_document(doc!(name => "horse"));
|
index_writer.add_document(doc!(name => "horse"));
|
||||||
index_writer.add_document(doc!(name => "jockey"));
|
index_writer.add_document(doc!(name => "jockey"));
|
||||||
|
|||||||
@@ -94,12 +94,24 @@ impl Footer {
|
|||||||
match &self.versioned_footer {
|
match &self.versioned_footer {
|
||||||
VersionedFooter::V1 {
|
VersionedFooter::V1 {
|
||||||
crc32: _crc,
|
crc32: _crc,
|
||||||
store_compression: compression,
|
store_compression,
|
||||||
} => {
|
} => {
|
||||||
if &library_version.store_compression != compression {
|
if &library_version.store_compression != store_compression {
|
||||||
return Err(Incompatibility::CompressionMismatch {
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
library_compression_format: library_version.store_compression.to_string(),
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
index_compression_format: compression.to_string(),
|
index_compression_format: store_compression.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
VersionedFooter::V2 {
|
||||||
|
crc32: _crc,
|
||||||
|
store_compression,
|
||||||
|
} => {
|
||||||
|
if &library_version.store_compression != store_compression {
|
||||||
|
return Err(Incompatibility::CompressionMismatch {
|
||||||
|
library_compression_format: library_version.store_compression.to_string(),
|
||||||
|
index_compression_format: store_compression.to_string(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -120,24 +132,29 @@ pub enum VersionedFooter {
|
|||||||
crc32: CrcHashU32,
|
crc32: CrcHashU32,
|
||||||
store_compression: String,
|
store_compression: String,
|
||||||
},
|
},
|
||||||
|
// Introduction of the Block WAND information.
|
||||||
|
V2 {
|
||||||
|
crc32: CrcHashU32,
|
||||||
|
store_compression: String,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for VersionedFooter {
|
impl BinarySerializable for VersionedFooter {
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
let mut buf = Vec::new();
|
let mut buf = Vec::new();
|
||||||
match self {
|
match self {
|
||||||
VersionedFooter::V1 {
|
VersionedFooter::V2 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: compression,
|
store_compression: compression,
|
||||||
} => {
|
} => {
|
||||||
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||||
// [ version | crc_hash | compression_mode ]
|
// [ version | crc_hash | compression_mode ]
|
||||||
// [ 0..4 | 4..8 | variable ]
|
// [ 0..4 | 4..8 | variable ]
|
||||||
BinarySerializable::serialize(&1u32, &mut buf)?;
|
BinarySerializable::serialize(&2u32, &mut buf)?;
|
||||||
BinarySerializable::serialize(crc32, &mut buf)?;
|
BinarySerializable::serialize(crc32, &mut buf)?;
|
||||||
BinarySerializable::serialize(compression, &mut buf)?;
|
BinarySerializable::serialize(compression, &mut buf)?;
|
||||||
}
|
}
|
||||||
VersionedFooter::UnknownVersion => {
|
VersionedFooter::V1 { .. } | VersionedFooter::UnknownVersion => {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::InvalidInput,
|
io::ErrorKind::InvalidInput,
|
||||||
"Cannot serialize an unknown versioned footer ",
|
"Cannot serialize an unknown versioned footer ",
|
||||||
@@ -166,22 +183,30 @@ impl BinarySerializable for VersionedFooter {
|
|||||||
reader.read_exact(&mut buf[..])?;
|
reader.read_exact(&mut buf[..])?;
|
||||||
let mut cursor = &buf[..];
|
let mut cursor = &buf[..];
|
||||||
let version = u32::deserialize(&mut cursor)?;
|
let version = u32::deserialize(&mut cursor)?;
|
||||||
if version == 1 {
|
if version != 1 && version != 2 {
|
||||||
let crc32 = u32::deserialize(&mut cursor)?;
|
return Ok(VersionedFooter::UnknownVersion);
|
||||||
let compression = String::deserialize(&mut cursor)?;
|
|
||||||
Ok(VersionedFooter::V1 {
|
|
||||||
crc32,
|
|
||||||
store_compression: compression,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(VersionedFooter::UnknownVersion)
|
|
||||||
}
|
}
|
||||||
|
let crc32 = u32::deserialize(&mut cursor)?;
|
||||||
|
let store_compression = String::deserialize(&mut cursor)?;
|
||||||
|
Ok(if version == 1 {
|
||||||
|
VersionedFooter::V1 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert_eq!(version, 2);
|
||||||
|
VersionedFooter::V2 {
|
||||||
|
crc32,
|
||||||
|
store_compression,
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VersionedFooter {
|
impl VersionedFooter {
|
||||||
pub fn crc(&self) -> Option<CrcHashU32> {
|
pub fn crc(&self) -> Option<CrcHashU32> {
|
||||||
match self {
|
match self {
|
||||||
|
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
|
||||||
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
||||||
VersionedFooter::UnknownVersion { .. } => None,
|
VersionedFooter::UnknownVersion { .. } => None,
|
||||||
}
|
}
|
||||||
@@ -219,7 +244,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
|||||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||||
let crc32 = self.hasher.take().unwrap().finalize();
|
let crc32 = self.hasher.take().unwrap().finalize();
|
||||||
let footer = Footer::new(VersionedFooter::V1 {
|
let footer = Footer::new(VersionedFooter::V2 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: crate::store::COMPRESSION.to_string(),
|
store_compression: crate::store::COMPRESSION.to_string(),
|
||||||
});
|
});
|
||||||
@@ -246,17 +271,17 @@ mod tests {
|
|||||||
let mut vec = Vec::new();
|
let mut vec = Vec::new();
|
||||||
let footer_proxy = FooterProxy::new(&mut vec);
|
let footer_proxy = FooterProxy::new(&mut vec);
|
||||||
assert!(footer_proxy.terminate().is_ok());
|
assert!(footer_proxy.terminate().is_ok());
|
||||||
assert_eq!(vec.len(), 167);
|
if crate::store::COMPRESSION == "lz4" {
|
||||||
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
assert_eq!(vec.len(), 158);
|
||||||
if let VersionedFooter::V1 {
|
|
||||||
crc32: _,
|
|
||||||
store_compression,
|
|
||||||
} = footer.versioned_footer
|
|
||||||
{
|
|
||||||
assert_eq!(store_compression, crate::store::COMPRESSION);
|
|
||||||
} else {
|
} else {
|
||||||
panic!("Versioned footer should be V1.");
|
assert_eq!(vec.len(), 167);
|
||||||
}
|
}
|
||||||
|
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
||||||
|
assert!(matches!(
|
||||||
|
footer.versioned_footer,
|
||||||
|
VersionedFooter::V2 { store_compression, .. }
|
||||||
|
if store_compression == crate::store::COMPRESSION
|
||||||
|
));
|
||||||
assert_eq!(&footer.version, crate::version());
|
assert_eq!(&footer.version, crate::version());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,7 +289,7 @@ mod tests {
|
|||||||
fn test_serialize_deserialize_footer() {
|
fn test_serialize_deserialize_footer() {
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
let crc32 = 123456u32;
|
let crc32 = 123456u32;
|
||||||
let footer: Footer = Footer::new(VersionedFooter::V1 {
|
let footer: Footer = Footer::new(VersionedFooter::V2 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
});
|
});
|
||||||
@@ -276,7 +301,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn footer_length() {
|
fn footer_length() {
|
||||||
let crc32 = 1111111u32;
|
let crc32 = 1111111u32;
|
||||||
let versioned_footer = VersionedFooter::V1 {
|
let versioned_footer = VersionedFooter::V2 {
|
||||||
crc32,
|
crc32,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
};
|
};
|
||||||
@@ -297,7 +322,7 @@ mod tests {
|
|||||||
// versionned footer length
|
// versionned footer length
|
||||||
12 | 128,
|
12 | 128,
|
||||||
// index format version
|
// index format version
|
||||||
1,
|
2,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
@@ -316,7 +341,7 @@ mod tests {
|
|||||||
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
||||||
assert!(cursor.is_empty());
|
assert!(cursor.is_empty());
|
||||||
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
||||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
|
let expected_versioned_footer: VersionedFooter = VersionedFooter::V2 {
|
||||||
crc32: expected_crc,
|
crc32: expected_crc,
|
||||||
store_compression: "lz4".to_string(),
|
store_compression: "lz4".to_string(),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use crate::core::MANAGED_FILEPATH;
|
use crate::core::{MANAGED_FILEPATH, META_FILEPATH};
|
||||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::footer::{Footer, FooterProxy};
|
use crate::directory::footer::{Footer, FooterProxy};
|
||||||
use crate::directory::DirectoryLock;
|
use crate::directory::DirectoryLock;
|
||||||
@@ -246,13 +246,15 @@ impl ManagedDirectory {
|
|||||||
/// List files for which checksum does not match content
|
/// List files for which checksum does not match content
|
||||||
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
|
||||||
let mut hashset = HashSet::new();
|
let mut hashset = HashSet::new();
|
||||||
let managed_paths = self
|
let mut managed_paths = self
|
||||||
.meta_informations
|
.meta_informations
|
||||||
.read()
|
.read()
|
||||||
.expect("Managed directory rlock poisoned in list damaged.")
|
.expect("Managed directory rlock poisoned in list damaged.")
|
||||||
.managed_paths
|
.managed_paths
|
||||||
.clone();
|
.clone();
|
||||||
|
|
||||||
|
managed_paths.remove(*META_FILEPATH);
|
||||||
|
|
||||||
for path in managed_paths.into_iter() {
|
for path in managed_paths.into_iter() {
|
||||||
if !self.validate_checksum(&path)? {
|
if !self.validate_checksum(&path)? {
|
||||||
hashset.insert(path);
|
hashset.insert(path);
|
||||||
|
|||||||
@@ -487,11 +487,13 @@ impl Directory for MmapDirectory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
fn atomic_write(&mut self, path: &Path, content: &[u8]) -> io::Result<()> {
|
||||||
debug!("Atomic Write {:?}", path);
|
debug!("Atomic Write {:?}", path);
|
||||||
|
let mut tempfile = tempfile::Builder::new().tempfile_in(&self.inner.root_path)?;
|
||||||
|
tempfile.write_all(content)?;
|
||||||
|
tempfile.flush()?;
|
||||||
let full_path = self.resolve_path(path);
|
let full_path = self.resolve_path(path);
|
||||||
let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
|
tempfile.into_temp_path().persist(full_path)?;
|
||||||
meta_file.write(|f| f.write_all(data))?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -652,7 +654,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let mut log_merge_policy = LogMergePolicy::default();
|
let mut log_merge_policy = LogMergePolicy::default();
|
||||||
log_merge_policy.set_min_merge_size(3);
|
log_merge_policy.set_min_merge_size(3);
|
||||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
||||||
|
|||||||
@@ -211,19 +211,19 @@ fn test_watch(directory: &mut dyn Directory) {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
assert_eq!(i, counter.load(SeqCst));
|
assert!(i <= counter.load(SeqCst));
|
||||||
assert!(directory
|
assert!(directory
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
|
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
|
||||||
assert_eq!(i + 1, counter.load(SeqCst));
|
assert!(i + 1 <= counter.load(SeqCst)); // notify can trigger more than once.
|
||||||
}
|
}
|
||||||
mem::drop(watch_handle);
|
mem::drop(watch_handle);
|
||||||
assert!(directory
|
assert!(directory
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
|
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
|
||||||
assert_eq!(10, counter.load(SeqCst));
|
assert!(10 <= counter.load(SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use std::sync::RwLock;
|
|||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
|
|
||||||
/// Type alias for callbacks registered when watching files of a `Directory`.
|
/// Type alias for callbacks registered when watching files of a `Directory`.
|
||||||
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
|
pub type WatchCallback = Box<dyn Fn() + Sync + Send>;
|
||||||
|
|
||||||
/// Helper struct to implement the watch method in `Directory` implementations.
|
/// Helper struct to implement the watch method in `Directory` implementations.
|
||||||
///
|
///
|
||||||
@@ -32,7 +32,7 @@ impl WatchHandle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl WatchCallbackList {
|
impl WatchCallbackList {
|
||||||
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||||
let watch_callback_arc = Arc::new(watch_callback);
|
let watch_callback_arc = Arc::new(watch_callback);
|
||||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ pub trait DocSet {
|
|||||||
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
let mut doc = self.doc();
|
let mut doc = self.doc();
|
||||||
|
debug_assert!(doc <= target);
|
||||||
while doc < target {
|
while doc < target {
|
||||||
doc = self.advance();
|
doc = self.advance();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ mod tests {
|
|||||||
let field = schema_builder.add_bytes_field("bytesfield");
|
let field = schema_builder.add_bytes_field("bytesfield");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
|
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
|
||||||
index_writer.add_document(doc!(field=>vec![]));
|
index_writer.add_document(doc!(field=>vec![]));
|
||||||
index_writer.add_document(doc!(field=>vec![255u8]));
|
index_writer.add_document(doc!(field=>vec![255u8]));
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ use std::io::Write;
|
|||||||
/// Write a delete `BitSet`
|
/// Write a delete `BitSet`
|
||||||
///
|
///
|
||||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||||
|
/// Warning: this function does not call terminate. The caller is in charge of
|
||||||
|
/// closing the writer properly.
|
||||||
pub fn write_delete_bitset(
|
pub fn write_delete_bitset(
|
||||||
delete_bitset: &BitSet,
|
delete_bitset: &BitSet,
|
||||||
max_doc: u32,
|
max_doc: u32,
|
||||||
@@ -42,6 +44,24 @@ pub struct DeleteBitSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DeleteBitSet {
|
impl DeleteBitSet {
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
|
||||||
|
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
|
||||||
|
use std::path::Path;
|
||||||
|
assert!(docs.iter().all(|&doc| doc < max_doc));
|
||||||
|
let mut bitset = BitSet::with_max_value(max_doc);
|
||||||
|
for &doc in docs {
|
||||||
|
bitset.insert(doc);
|
||||||
|
}
|
||||||
|
let mut directory = RAMDirectory::create();
|
||||||
|
let path = Path::new("dummydeletebitset");
|
||||||
|
let mut wrt = directory.open_write(path).unwrap();
|
||||||
|
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
|
||||||
|
wrt.terminate().unwrap();
|
||||||
|
let source = directory.open_read(path).unwrap();
|
||||||
|
Self::open(source)
|
||||||
|
}
|
||||||
|
|
||||||
/// Opens a delete bitset given its data source.
|
/// Opens a delete bitset given its data source.
|
||||||
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
||||||
let num_deleted: usize = data
|
let num_deleted: usize = data
|
||||||
@@ -83,42 +103,35 @@ impl HasLen for DeleteBitSet {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::DeleteBitSet;
|
||||||
use crate::directory::*;
|
use crate::common::HasLen;
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
#[test]
|
||||||
let test_path = PathBuf::from("test");
|
fn test_delete_bitset_empty() {
|
||||||
let mut directory = RAMDirectory::create();
|
let delete_bitset = DeleteBitSet::for_test(&[], 10);
|
||||||
{
|
for doc in 0..10 {
|
||||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||||
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
|
||||||
writer.terminate().unwrap();
|
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&test_path).unwrap();
|
assert_eq!(delete_bitset.len(), 0);
|
||||||
let delete_bitset = DeleteBitSet::open(source);
|
|
||||||
for doc in 0..max_doc {
|
|
||||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
|
||||||
}
|
|
||||||
assert_eq!(delete_bitset.len(), bitset.len());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_bitset() {
|
fn test_delete_bitset() {
|
||||||
{
|
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
|
||||||
let mut bitset = BitSet::with_max_value(10);
|
assert!(delete_bitset.is_alive(0));
|
||||||
bitset.insert(1);
|
assert!(delete_bitset.is_deleted(1));
|
||||||
bitset.insert(9);
|
assert!(delete_bitset.is_alive(2));
|
||||||
test_delete_bitset_helper(&bitset, 10);
|
assert!(delete_bitset.is_alive(3));
|
||||||
}
|
assert!(delete_bitset.is_alive(4));
|
||||||
{
|
assert!(delete_bitset.is_alive(5));
|
||||||
let mut bitset = BitSet::with_max_value(8);
|
assert!(delete_bitset.is_alive(6));
|
||||||
bitset.insert(1);
|
assert!(delete_bitset.is_alive(6));
|
||||||
bitset.insert(2);
|
assert!(delete_bitset.is_alive(7));
|
||||||
bitset.insert(3);
|
assert!(delete_bitset.is_alive(8));
|
||||||
bitset.insert(5);
|
assert!(delete_bitset.is_deleted(9));
|
||||||
bitset.insert(7);
|
for doc in 0..10 {
|
||||||
test_delete_bitset_helper(&bitset, 8);
|
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
|
||||||
}
|
}
|
||||||
|
assert_eq!(delete_bitset.len(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -474,7 +474,7 @@ mod tests {
|
|||||||
let date_field = schema_builder.add_date_field("date", FAST);
|
let date_field = schema_builder.add_date_field("date", FAST);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
@@ -511,7 +511,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
index_writer.add_document(doc!(field=>1u64, field=>3u64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=>4u64));
|
index_writer.add_document(doc!(field=>4u64));
|
||||||
@@ -64,7 +64,7 @@ mod tests {
|
|||||||
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let first_time_stamp = chrono::Utc::now();
|
let first_time_stamp = chrono::Utc::now();
|
||||||
index_writer.add_document(
|
index_writer.add_document(
|
||||||
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
|
||||||
@@ -186,7 +186,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
|
||||||
index_writer.add_document(doc!());
|
index_writer.add_document(doc!());
|
||||||
index_writer.add_document(doc!(field=> -4i64));
|
index_writer.add_document(doc!(field=> -4i64));
|
||||||
@@ -221,7 +221,7 @@ mod tests {
|
|||||||
let field = schema_builder.add_facet_field("facetfield");
|
let field = schema_builder.add_facet_field("facetfield");
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for i in 0..100_000 {
|
for i in 0..100_000 {
|
||||||
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index
|
let mut index_writer = index
|
||||||
.writer_with_num_threads(1, 30_000_000)
|
.writer_for_tests()
|
||||||
.expect("Failed to create index writer.");
|
.expect("Failed to create index writer.");
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
facet_field => Facet::from("/category/cat2"),
|
facet_field => Facet::from("/category/cat2"),
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ mod reader;
|
|||||||
mod serializer;
|
mod serializer;
|
||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
pub use self::reader::FieldNormReader;
|
pub use self::reader::{FieldNormReader, FieldNormReaders};
|
||||||
pub use self::serializer::FieldNormsSerializer;
|
pub use self::serializer::FieldNormsSerializer;
|
||||||
pub use self::writer::FieldNormsWriter;
|
pub use self::writer::FieldNormsWriter;
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,41 @@
|
|||||||
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
use super::{fieldnorm_to_id, id_to_fieldnorm};
|
||||||
|
use crate::common::CompositeFile;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::ReadOnlySource;
|
||||||
|
use crate::schema::Field;
|
||||||
|
use crate::space_usage::PerFieldSpaceUsage;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
|
||||||
|
/// field) of all indexed fields in the index.
|
||||||
|
///
|
||||||
|
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
|
||||||
|
/// `fieldnorm_id`.
|
||||||
|
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct FieldNormReaders {
|
||||||
|
data: Arc<CompositeFile>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FieldNormReaders {
|
||||||
|
/// Creates a field norm reader.
|
||||||
|
pub fn open(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
|
||||||
|
let data = CompositeFile::open(&source)?;
|
||||||
|
Ok(FieldNormReaders {
|
||||||
|
data: Arc::new(data),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the FieldNormReader for a specific field.
|
||||||
|
pub fn get_field(&self, field: Field) -> Option<FieldNormReader> {
|
||||||
|
self.data.open_read(field).map(FieldNormReader::open)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a break down of the space usage per field.
|
||||||
|
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||||
|
self.data.space_usage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Reads the fieldnorm associated to a document.
|
/// Reads the fieldnorm associated to a document.
|
||||||
/// The fieldnorm represents the length associated to
|
/// The fieldnorm represents the length associated to
|
||||||
@@ -19,6 +54,7 @@ use crate::DocId;
|
|||||||
/// Apart from compression, this scale also makes it possible to
|
/// Apart from compression, this scale also makes it possible to
|
||||||
/// precompute computationally expensive functions of the fieldnorm
|
/// precompute computationally expensive functions of the fieldnorm
|
||||||
/// in a very short array.
|
/// in a very short array.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct FieldNormReader {
|
pub struct FieldNormReader {
|
||||||
data: ReadOnlySource,
|
data: ReadOnlySource,
|
||||||
}
|
}
|
||||||
@@ -29,6 +65,11 @@ impl FieldNormReader {
|
|||||||
FieldNormReader { data }
|
FieldNormReader { data }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the number of documents in this segment.
|
||||||
|
pub fn num_docs(&self) -> u32 {
|
||||||
|
self.data.len() as u32
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the `fieldnorm` associated to a doc id.
|
/// Returns the `fieldnorm` associated to a doc id.
|
||||||
/// The fieldnorm is a value approximating the number
|
/// The fieldnorm is a value approximating the number
|
||||||
/// of tokens in a given field of the `doc_id`.
|
/// of tokens in a given field of the `doc_id`.
|
||||||
@@ -62,13 +103,12 @@ impl FieldNormReader {
|
|||||||
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||||
fieldnorm_to_id(fieldnorm)
|
fieldnorm_to_id(fieldnorm)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
impl From<Vec<u32>> for FieldNormReader {
|
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
|
||||||
fn from(field_norms: Vec<u32>) -> FieldNormReader {
|
|
||||||
let field_norms_id = field_norms
|
let field_norms_id = field_norms
|
||||||
.into_iter()
|
.iter()
|
||||||
|
.cloned()
|
||||||
.map(FieldNormReader::fieldnorm_to_id)
|
.map(FieldNormReader::fieldnorm_to_id)
|
||||||
.collect::<Vec<u8>>();
|
.collect::<Vec<u8>>();
|
||||||
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
let field_norms_data = ReadOnlySource::from(field_norms_id);
|
||||||
@@ -77,3 +117,20 @@ impl From<Vec<u32>> for FieldNormReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::fieldnorm::FieldNormReader;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_from_fieldnorms_array() {
|
||||||
|
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
|
||||||
|
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
|
||||||
|
assert_eq!(fieldnorm_reader.num_docs(), 5);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
|
||||||
|
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -78,11 +78,12 @@ impl FieldNormsWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
/// Serialize the seen fieldnorm values to the serializer for all fields.
|
||||||
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
|
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
|
||||||
for &field in self.fields.iter() {
|
for &field in self.fields.iter() {
|
||||||
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
|
||||||
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
|
||||||
}
|
}
|
||||||
|
fieldnorms_serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -536,6 +536,7 @@ impl IndexWriter {
|
|||||||
/// when no documents are remaining.
|
/// when no documents are remaining.
|
||||||
///
|
///
|
||||||
/// Returns the former segment_ready channel.
|
/// Returns the former segment_ready channel.
|
||||||
|
#[allow(unused_must_use)]
|
||||||
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
fn recreate_document_channel(&mut self) -> OperationReceiver {
|
||||||
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
|
||||||
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
|
||||||
@@ -575,7 +576,7 @@ impl IndexWriter {
|
|||||||
//
|
//
|
||||||
// This will drop the document queue, and the thread
|
// This will drop the document queue, and the thread
|
||||||
// should terminate.
|
// should terminate.
|
||||||
mem::replace(self, new_index_writer);
|
*self = new_index_writer;
|
||||||
|
|
||||||
// Drains the document receiver pipeline :
|
// Drains the document receiver pipeline :
|
||||||
// Workers don't need to index the pending documents.
|
// Workers don't need to index the pending documents.
|
||||||
@@ -799,7 +800,7 @@ mod tests {
|
|||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let index_writer = index.writer_for_tests().unwrap();
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
UserOperation::Add(doc!(text_field=>"a")),
|
UserOperation::Add(doc!(text_field=>"a")),
|
||||||
UserOperation::Add(doc!(text_field=>"b")),
|
UserOperation::Add(doc!(text_field=>"b")),
|
||||||
@@ -814,7 +815,7 @@ mod tests {
|
|||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "hello1"));
|
index_writer.add_document(doc!(text_field => "hello1"));
|
||||||
index_writer.add_document(doc!(text_field => "hello2"));
|
index_writer.add_document(doc!(text_field => "hello2"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
@@ -863,7 +864,7 @@ mod tests {
|
|||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let a_term = Term::from_field_text(text_field, "a");
|
let a_term = Term::from_field_text(text_field, "a");
|
||||||
let b_term = Term::from_field_text(text_field, "b");
|
let b_term = Term::from_field_text(text_field, "b");
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
@@ -925,8 +926,8 @@ mod tests {
|
|||||||
fn test_lockfile_already_exists_error_msg() {
|
fn test_lockfile_already_exists_error_msg() {
|
||||||
let schema_builder = schema::Schema::builder();
|
let schema_builder = schema::Schema::builder();
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let _index_writer = index.writer_for_tests().unwrap();
|
||||||
match index.writer_with_num_threads(1, 3_000_000) {
|
match index.writer_for_tests() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let err_msg = err.to_string();
|
let err_msg = err.to_string();
|
||||||
assert!(err_msg.contains("already an `IndexWriter`"));
|
assert!(err_msg.contains("already an `IndexWriter`"));
|
||||||
@@ -1260,7 +1261,7 @@ mod tests {
|
|||||||
let idfield = schema_builder.add_text_field("id", STRING);
|
let idfield = schema_builder.add_text_field("id", STRING);
|
||||||
schema_builder.add_text_field("optfield", STRING);
|
schema_builder.add_text_field("optfield", STRING);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(idfield=>"myid"));
|
index_writer.add_document(doc!(idfield=>"myid"));
|
||||||
let commit = index_writer.commit();
|
let commit = index_writer.commit();
|
||||||
assert!(commit.is_ok());
|
assert!(commit.is_ok());
|
||||||
|
|||||||
@@ -8,30 +8,31 @@ use crate::fastfield::DeleteBitSet;
|
|||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::fastfield::FastFieldSerializer;
|
use crate::fastfield::FastFieldSerializer;
|
||||||
use crate::fastfield::MultiValueIntFastFieldReader;
|
use crate::fastfield::MultiValueIntFastFieldReader;
|
||||||
use crate::fieldnorm::FieldNormReader;
|
|
||||||
use crate::fieldnorm::FieldNormsSerializer;
|
use crate::fieldnorm::FieldNormsSerializer;
|
||||||
use crate::fieldnorm::FieldNormsWriter;
|
use crate::fieldnorm::FieldNormsWriter;
|
||||||
|
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||||
use crate::indexer::SegmentSerializer;
|
use crate::indexer::SegmentSerializer;
|
||||||
use crate::postings::InvertedIndexSerializer;
|
|
||||||
use crate::postings::Postings;
|
use crate::postings::Postings;
|
||||||
|
use crate::postings::{InvertedIndexSerializer, SegmentPostings};
|
||||||
use crate::schema::Cardinality;
|
use crate::schema::Cardinality;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::{Field, Schema};
|
use crate::schema::{Field, Schema};
|
||||||
use crate::store::StoreWriter;
|
use crate::store::StoreWriter;
|
||||||
use crate::termdict::TermMerger;
|
use crate::termdict::TermMerger;
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
use crate::DocId;
|
use crate::{DocId, InvertedIndexReader, SegmentComponent};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
|
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::Result<u64> {
|
||||||
let mut total_tokens = 0u64;
|
let mut total_tokens = 0u64;
|
||||||
let mut count: [usize; 256] = [0; 256];
|
let mut count: [usize; 256] = [0; 256];
|
||||||
for reader in readers {
|
for reader in readers {
|
||||||
if reader.has_deletes() {
|
if reader.has_deletes() {
|
||||||
// if there are deletes, then we use an approximation
|
// if there are deletes, then we use an approximation
|
||||||
// using the fieldnorm
|
// using the fieldnorm
|
||||||
let fieldnorms_reader = reader.get_fieldnorms_reader(field);
|
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?;
|
||||||
for doc in reader.doc_ids_alive() {
|
for doc in reader.doc_ids_alive() {
|
||||||
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc);
|
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc);
|
||||||
count[fieldnorm_id as usize] += 1;
|
count[fieldnorm_id as usize] += 1;
|
||||||
@@ -40,7 +41,7 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
|
|||||||
total_tokens += reader.inverted_index(field).total_num_tokens();
|
total_tokens += reader.inverted_index(field).total_num_tokens();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
total_tokens
|
Ok(total_tokens
|
||||||
+ count
|
+ count
|
||||||
.iter()
|
.iter()
|
||||||
.cloned()
|
.cloned()
|
||||||
@@ -48,7 +49,7 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
|
|||||||
.map(|(fieldnorm_ord, count)| {
|
.map(|(fieldnorm_ord, count)| {
|
||||||
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
|
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
|
||||||
})
|
})
|
||||||
.sum::<u64>()
|
.sum::<u64>())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct IndexMerger {
|
pub struct IndexMerger {
|
||||||
@@ -167,14 +168,14 @@ impl IndexMerger {
|
|||||||
|
|
||||||
fn write_fieldnorms(
|
fn write_fieldnorms(
|
||||||
&self,
|
&self,
|
||||||
fieldnorms_serializer: &mut FieldNormsSerializer,
|
mut fieldnorms_serializer: FieldNormsSerializer,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema);
|
let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema);
|
||||||
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
|
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
|
||||||
for field in fields {
|
for field in fields {
|
||||||
fieldnorms_data.clear();
|
fieldnorms_data.clear();
|
||||||
for reader in &self.readers {
|
for reader in &self.readers {
|
||||||
let fieldnorms_reader = reader.get_fieldnorms_reader(field);
|
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?;
|
||||||
for doc_id in reader.doc_ids_alive() {
|
for doc_id in reader.doc_ids_alive() {
|
||||||
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id);
|
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id);
|
||||||
fieldnorms_data.push(fieldnorm_id);
|
fieldnorms_data.push(fieldnorm_id);
|
||||||
@@ -182,6 +183,7 @@ impl IndexMerger {
|
|||||||
}
|
}
|
||||||
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
|
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
|
||||||
}
|
}
|
||||||
|
fieldnorms_serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -492,10 +494,11 @@ impl IndexMerger {
|
|||||||
indexed_field: Field,
|
indexed_field: Field,
|
||||||
field_type: &FieldType,
|
field_type: &FieldType,
|
||||||
serializer: &mut InvertedIndexSerializer,
|
serializer: &mut InvertedIndexSerializer,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> crate::Result<Option<TermOrdinalMapping>> {
|
) -> crate::Result<Option<TermOrdinalMapping>> {
|
||||||
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
|
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
|
||||||
let mut delta_computer = DeltaComputer::new();
|
let mut delta_computer = DeltaComputer::new();
|
||||||
let field_readers = self
|
let field_readers: Vec<Arc<InvertedIndexReader>> = self
|
||||||
.readers
|
.readers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|reader| reader.inverted_index(indexed_field))
|
.map(|reader| reader.inverted_index(indexed_field))
|
||||||
@@ -538,7 +541,7 @@ impl IndexMerger {
|
|||||||
// The total number of tokens will only be exact when there has been no deletes.
|
// The total number of tokens will only be exact when there has been no deletes.
|
||||||
//
|
//
|
||||||
// Otherwise, we approximate by removing deleted documents proportionally.
|
// Otherwise, we approximate by removing deleted documents proportionally.
|
||||||
let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field);
|
let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field)?;
|
||||||
|
|
||||||
// Create the total list of doc ids
|
// Create the total list of doc ids
|
||||||
// by stacking the doc ids from the different segment.
|
// by stacking the doc ids from the different segment.
|
||||||
@@ -550,7 +553,8 @@ impl IndexMerger {
|
|||||||
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc,
|
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc,
|
||||||
// seg0.max_doc + seg1.max_doc + seg2.max_doc]
|
// seg0.max_doc + seg1.max_doc + seg2.max_doc]
|
||||||
// ...
|
// ...
|
||||||
let mut field_serializer = serializer.new_field(indexed_field, total_num_tokens)?;
|
let mut field_serializer =
|
||||||
|
serializer.new_field(indexed_field, total_num_tokens, fieldnorm_reader)?;
|
||||||
|
|
||||||
let field_entry = self.schema.get_field_entry(indexed_field);
|
let field_entry = self.schema.get_field_entry(indexed_field);
|
||||||
|
|
||||||
@@ -560,43 +564,45 @@ impl IndexMerger {
|
|||||||
indexed. Have you modified the schema?",
|
indexed. Have you modified the schema?",
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let mut segment_postings_containing_the_term: Vec<(usize, SegmentPostings)> = vec![];
|
||||||
|
|
||||||
while merged_terms.advance() {
|
while merged_terms.advance() {
|
||||||
|
segment_postings_containing_the_term.clear();
|
||||||
let term_bytes: &[u8] = merged_terms.key();
|
let term_bytes: &[u8] = merged_terms.key();
|
||||||
|
|
||||||
|
let mut total_doc_freq = 0;
|
||||||
|
|
||||||
// Let's compute the list of non-empty posting lists
|
// Let's compute the list of non-empty posting lists
|
||||||
let segment_postings: Vec<_> = merged_terms
|
for heap_item in merged_terms.current_kvs() {
|
||||||
.current_kvs()
|
let segment_ord = heap_item.segment_ord;
|
||||||
.iter()
|
let term_info = heap_item.streamer.value();
|
||||||
.flat_map(|heap_item| {
|
let segment_reader = &self.readers[heap_item.segment_ord];
|
||||||
let segment_ord = heap_item.segment_ord;
|
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
|
||||||
let term_info = heap_item.streamer.value();
|
let segment_postings =
|
||||||
let segment_reader = &self.readers[heap_item.segment_ord];
|
inverted_index.read_postings_from_terminfo(term_info, segment_postings_option);
|
||||||
let inverted_index = segment_reader.inverted_index(indexed_field);
|
let delete_bitset_opt = segment_reader.delete_bitset();
|
||||||
let mut segment_postings = inverted_index
|
let doc_freq = if let Some(delete_bitset) = delete_bitset_opt {
|
||||||
.read_postings_from_terminfo(term_info, segment_postings_option);
|
segment_postings.doc_freq_given_deletes(delete_bitset)
|
||||||
let mut doc = segment_postings.doc();
|
} else {
|
||||||
while doc != TERMINATED {
|
segment_postings.doc_freq()
|
||||||
if !segment_reader.is_deleted(doc) {
|
};
|
||||||
return Some((segment_ord, segment_postings));
|
if doc_freq > 0u32 {
|
||||||
}
|
total_doc_freq += doc_freq;
|
||||||
doc = segment_postings.advance();
|
segment_postings_containing_the_term.push((segment_ord, segment_postings));
|
||||||
}
|
}
|
||||||
None
|
}
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// At this point, `segment_postings` contains the posting list
|
// At this point, `segment_postings` contains the posting list
|
||||||
// of all of the segments containing the given term.
|
// of all of the segments containing the given term (and that are non-empty)
|
||||||
//
|
//
|
||||||
// These segments are non-empty and advance has already been called.
|
// These segments are non-empty and advance has already been called.
|
||||||
if segment_postings.is_empty() {
|
if total_doc_freq == 0u32 {
|
||||||
|
// All docs that used to contain the term have been deleted. The `term` will be
|
||||||
|
// entirely removed.
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// If not, the `term` will be entirely removed.
|
|
||||||
|
|
||||||
// We know that there is at least one document containing
|
let to_term_ord = field_serializer.new_term(term_bytes, total_doc_freq)?;
|
||||||
// the term, so we add it.
|
|
||||||
let to_term_ord = field_serializer.new_term(term_bytes)?;
|
|
||||||
|
|
||||||
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
|
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
|
||||||
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
|
||||||
@@ -606,7 +612,9 @@ impl IndexMerger {
|
|||||||
|
|
||||||
// We can now serialize this postings, by pushing each document to the
|
// We can now serialize this postings, by pushing each document to the
|
||||||
// postings serializer.
|
// postings serializer.
|
||||||
for (segment_ord, mut segment_postings) in segment_postings {
|
for (segment_ord, mut segment_postings) in
|
||||||
|
segment_postings_containing_the_term.drain(..)
|
||||||
|
{
|
||||||
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
|
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
|
||||||
|
|
||||||
let mut doc = segment_postings.doc();
|
let mut doc = segment_postings.doc();
|
||||||
@@ -636,13 +644,18 @@ impl IndexMerger {
|
|||||||
fn write_postings(
|
fn write_postings(
|
||||||
&self,
|
&self,
|
||||||
serializer: &mut InvertedIndexSerializer,
|
serializer: &mut InvertedIndexSerializer,
|
||||||
|
fieldnorm_readers: FieldNormReaders,
|
||||||
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
|
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
|
||||||
let mut term_ordinal_mappings = HashMap::new();
|
let mut term_ordinal_mappings = HashMap::new();
|
||||||
for (field, field_entry) in self.schema.fields() {
|
for (field, field_entry) in self.schema.fields() {
|
||||||
|
let fieldnorm_reader = fieldnorm_readers.get_field(field);
|
||||||
if field_entry.is_indexed() {
|
if field_entry.is_indexed() {
|
||||||
if let Some(term_ordinal_mapping) =
|
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
|
||||||
self.write_postings_for_field(field, field_entry.field_type(), serializer)?
|
field,
|
||||||
{
|
field_entry.field_type(),
|
||||||
|
serializer,
|
||||||
|
fieldnorm_reader,
|
||||||
|
)? {
|
||||||
term_ordinal_mappings.insert(field, term_ordinal_mapping);
|
term_ordinal_mappings.insert(field, term_ordinal_mapping);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -668,8 +681,15 @@ impl IndexMerger {
|
|||||||
|
|
||||||
impl SerializableSegment for IndexMerger {
|
impl SerializableSegment for IndexMerger {
|
||||||
fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
|
fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
|
||||||
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
|
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||||
self.write_fieldnorms(serializer.get_fieldnorms_serializer())?;
|
self.write_fieldnorms(fieldnorms_serializer)?;
|
||||||
|
}
|
||||||
|
let fieldnorm_data = serializer
|
||||||
|
.segment()
|
||||||
|
.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
|
let term_ord_mappings =
|
||||||
|
self.write_postings(serializer.get_postings_serializer(), fieldnorm_readers)?;
|
||||||
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
|
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
|
||||||
self.write_storable_fields(serializer.get_store_writer())?;
|
self.write_storable_fields(serializer.get_store_writer())?;
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
@@ -679,15 +699,15 @@ impl SerializableSegment for IndexMerger {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use crate::assert_nearly_equals;
|
||||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||||
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
|
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
|
||||||
use crate::collector::{Count, FacetCollector};
|
use crate::collector::{Count, FacetCollector};
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
use crate::query::BooleanQuery;
|
use crate::query::BooleanQuery;
|
||||||
|
use crate::query::Scorer;
|
||||||
use crate::query::TermQuery;
|
use crate::query::TermQuery;
|
||||||
use crate::schema;
|
|
||||||
use crate::schema::Cardinality;
|
|
||||||
use crate::schema::Document;
|
use crate::schema::Document;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
@@ -695,9 +715,11 @@ mod tests {
|
|||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::schema::TextFieldIndexing;
|
use crate::schema::TextFieldIndexing;
|
||||||
use crate::schema::INDEXED;
|
use crate::schema::INDEXED;
|
||||||
|
use crate::schema::{Cardinality, TEXT};
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
|
use crate::{schema, DocSet, SegmentId};
|
||||||
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
|
||||||
use futures::executor::block_on;
|
use futures::executor::block_on;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
@@ -729,7 +751,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
{
|
{
|
||||||
@@ -781,7 +803,7 @@ mod tests {
|
|||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads().unwrap();
|
||||||
}
|
}
|
||||||
@@ -882,7 +904,7 @@ mod tests {
|
|||||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
|
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let search_term = |searcher: &Searcher, term: Term| {
|
let search_term = |searcher: &Searcher, term: Term| {
|
||||||
let collector = FastFieldTestCollector::for_field(score_field);
|
let collector = FastFieldTestCollector::for_field(score_field);
|
||||||
@@ -1189,7 +1211,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
|
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
|
||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
for facet in doc_facets {
|
for facet in doc_facets {
|
||||||
@@ -1254,7 +1276,7 @@ mod tests {
|
|||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads().unwrap();
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
@@ -1273,7 +1295,7 @@ mod tests {
|
|||||||
|
|
||||||
// Deleting one term
|
// Deleting one term
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
|
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
|
||||||
let facet_term = Term::from_facet(facet_field, &facet);
|
let facet_term = Term::from_facet(facet_field, &facet);
|
||||||
index_writer.delete_term(facet_term);
|
index_writer.delete_term(facet_term);
|
||||||
@@ -1298,7 +1320,7 @@ mod tests {
|
|||||||
let mut schema_builder = schema::Schema::builder();
|
let mut schema_builder = schema::Schema::builder();
|
||||||
let int_field = schema_builder.add_u64_field("intvals", INDEXED);
|
let int_field = schema_builder.add_u64_field("intvals", INDEXED);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(int_field => 1u64));
|
index_writer.add_document(doc!(int_field => 1u64));
|
||||||
index_writer.commit().expect("commit failed");
|
index_writer.commit().expect("commit failed");
|
||||||
index_writer.add_document(doc!(int_field => 1u64));
|
index_writer.add_document(doc!(int_field => 1u64));
|
||||||
@@ -1327,7 +1349,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
doc.add_u64(int_field, 1);
|
doc.add_u64(int_field, 1);
|
||||||
index_writer.add_document(doc.clone());
|
index_writer.add_document(doc.clone());
|
||||||
@@ -1366,7 +1388,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
|
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
|
||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
for &val in int_vals {
|
for &val in int_vals {
|
||||||
@@ -1440,7 +1462,7 @@ mod tests {
|
|||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
assert!(index_writer.wait_merging_threads().is_ok());
|
||||||
}
|
}
|
||||||
@@ -1494,7 +1516,7 @@ mod tests {
|
|||||||
|
|
||||||
let index = Index::create_in_ram(builder.build());
|
let index = Index::create_in_ram(builder.build());
|
||||||
|
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
|
let mut writer = index.writer_for_tests()?;
|
||||||
|
|
||||||
// Make sure we'll attempt to merge every created segment
|
// Make sure we'll attempt to merge every created segment
|
||||||
let mut policy = crate::indexer::LogMergePolicy::default();
|
let mut policy = crate::indexer::LogMergePolicy::default();
|
||||||
@@ -1504,12 +1526,9 @@ mod tests {
|
|||||||
for i in 0..100 {
|
for i in 0..100 {
|
||||||
let mut doc = Document::new();
|
let mut doc = Document::new();
|
||||||
doc.add_f64(field, 42.0);
|
doc.add_f64(field, 42.0);
|
||||||
|
|
||||||
doc.add_f64(multi_field, 0.24);
|
doc.add_f64(multi_field, 0.24);
|
||||||
doc.add_f64(multi_field, 0.27);
|
doc.add_f64(multi_field, 0.27);
|
||||||
|
|
||||||
writer.add_document(doc);
|
writer.add_document(doc);
|
||||||
|
|
||||||
if i % 5 == 0 {
|
if i % 5 == 0 {
|
||||||
writer.commit()?;
|
writer.commit()?;
|
||||||
}
|
}
|
||||||
@@ -1521,6 +1540,72 @@ mod tests {
|
|||||||
// If a merging thread fails, we should end up with more
|
// If a merging thread fails, we should end up with more
|
||||||
// than one segment here
|
// than one segment here
|
||||||
assert_eq!(1, index.searchable_segments()?.len());
|
assert_eq!(1, index.searchable_segments()?.len());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merged_index_has_blockwand() -> crate::Result<()> {
|
||||||
|
let mut builder = schema::SchemaBuilder::new();
|
||||||
|
let text = builder.add_text_field("text", TEXT);
|
||||||
|
let index = Index::create_in_ram(builder.build());
|
||||||
|
let mut writer = index.writer_for_tests()?;
|
||||||
|
let happy_term = Term::from_field_text(text, "happy");
|
||||||
|
let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs);
|
||||||
|
for _ in 0..62 {
|
||||||
|
writer.add_document(doc!(text=>"hello happy tax payer"));
|
||||||
|
}
|
||||||
|
writer.commit()?;
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let mut term_scorer = term_query
|
||||||
|
.specialized_weight(&searcher, true)
|
||||||
|
.specialized_scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||||
|
assert_eq!(term_scorer.doc(), 0);
|
||||||
|
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
|
||||||
|
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
|
||||||
|
for _ in 0..81 {
|
||||||
|
writer.add_document(doc!(text=>"hello happy tax payer"));
|
||||||
|
}
|
||||||
|
writer.commit()?;
|
||||||
|
reader.reload()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 2);
|
||||||
|
for segment_reader in searcher.segment_readers() {
|
||||||
|
let mut term_scorer = term_query
|
||||||
|
.specialized_weight(&searcher, true)
|
||||||
|
.specialized_scorer(segment_reader, 1.0)?;
|
||||||
|
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
|
||||||
|
for doc in segment_reader.doc_ids_alive() {
|
||||||
|
assert_eq!(term_scorer.doc(), doc);
|
||||||
|
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
|
||||||
|
assert_nearly_equals!(term_scorer.score(), 0.003478312);
|
||||||
|
term_scorer.advance();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let segment_ids: Vec<SegmentId> = searcher
|
||||||
|
.segment_readers()
|
||||||
|
.iter()
|
||||||
|
.map(|reader| reader.segment_id())
|
||||||
|
.collect();
|
||||||
|
block_on(writer.merge(&segment_ids[..]))?;
|
||||||
|
|
||||||
|
reader.reload()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
|
||||||
|
let segment_reader = searcher.segment_reader(0u32);
|
||||||
|
let mut term_scorer = term_query
|
||||||
|
.specialized_weight(&searcher, true)
|
||||||
|
.specialized_scorer(segment_reader, 1.0)?;
|
||||||
|
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
|
||||||
|
for doc in segment_reader.doc_ids_alive() {
|
||||||
|
assert_eq!(term_scorer.doc(), doc);
|
||||||
|
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
|
||||||
|
assert_nearly_equals!(term_scorer.score(), 0.003478312);
|
||||||
|
term_scorer.advance();
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,8 +29,9 @@ pub use self::segment_writer::SegmentWriter;
|
|||||||
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
||||||
pub type DefaultMergePolicy = LogMergePolicy;
|
pub type DefaultMergePolicy = LogMergePolicy;
|
||||||
|
|
||||||
|
#[cfg(feature = "mmap")]
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests_mmap {
|
||||||
use crate::schema::{self, Schema};
|
use crate::schema::{self, Schema};
|
||||||
use crate::{Index, Term};
|
use crate::{Index, Term};
|
||||||
|
|
||||||
@@ -39,7 +40,7 @@ mod tests {
|
|||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||||
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
// there must be one deleted document in the segment
|
// there must be one deleted document in the segment
|
||||||
index_writer.add_document(doc!(text_field=>"b"));
|
index_writer.add_document(doc!(text_field=>"b"));
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||||
|
|||||||
@@ -8,15 +8,16 @@ use crate::store::StoreWriter;
|
|||||||
/// Segment serializer is in charge of laying out on disk
|
/// Segment serializer is in charge of laying out on disk
|
||||||
/// the data accumulated and sorted by the `SegmentWriter`.
|
/// the data accumulated and sorted by the `SegmentWriter`.
|
||||||
pub struct SegmentSerializer {
|
pub struct SegmentSerializer {
|
||||||
|
segment: Segment,
|
||||||
store_writer: StoreWriter,
|
store_writer: StoreWriter,
|
||||||
fast_field_serializer: FastFieldSerializer,
|
fast_field_serializer: FastFieldSerializer,
|
||||||
fieldnorms_serializer: FieldNormsSerializer,
|
fieldnorms_serializer: Option<FieldNormsSerializer>,
|
||||||
postings_serializer: InvertedIndexSerializer,
|
postings_serializer: InvertedIndexSerializer,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentSerializer {
|
impl SegmentSerializer {
|
||||||
/// Creates a new `SegmentSerializer`.
|
/// Creates a new `SegmentSerializer`.
|
||||||
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
|
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
|
||||||
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
let store_write = segment.open_write(SegmentComponent::STORE)?;
|
||||||
|
|
||||||
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
|
||||||
@@ -25,15 +26,20 @@ impl SegmentSerializer {
|
|||||||
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
|
||||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||||
|
|
||||||
let postings_serializer = InvertedIndexSerializer::open(segment)?;
|
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||||
Ok(SegmentSerializer {
|
Ok(SegmentSerializer {
|
||||||
|
segment,
|
||||||
store_writer: StoreWriter::new(store_write),
|
store_writer: StoreWriter::new(store_write),
|
||||||
fast_field_serializer,
|
fast_field_serializer,
|
||||||
fieldnorms_serializer,
|
fieldnorms_serializer: Some(fieldnorms_serializer),
|
||||||
postings_serializer,
|
postings_serializer,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn segment(&self) -> &Segment {
|
||||||
|
&self.segment
|
||||||
|
}
|
||||||
|
|
||||||
/// Accessor to the `PostingsSerializer`.
|
/// Accessor to the `PostingsSerializer`.
|
||||||
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
|
||||||
&mut self.postings_serializer
|
&mut self.postings_serializer
|
||||||
@@ -44,9 +50,11 @@ impl SegmentSerializer {
|
|||||||
&mut self.fast_field_serializer
|
&mut self.fast_field_serializer
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the field norm serializer.
|
/// Extract the field norm serializer.
|
||||||
pub fn get_fieldnorms_serializer(&mut self) -> &mut FieldNormsSerializer {
|
///
|
||||||
&mut self.fieldnorms_serializer
|
/// Note the fieldnorms serializer can only be extracted once.
|
||||||
|
pub fn extract_fieldnorms_serializer(&mut self) -> Option<FieldNormsSerializer> {
|
||||||
|
self.fieldnorms_serializer.take()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to the `StoreWriter`.
|
/// Accessor to the `StoreWriter`.
|
||||||
@@ -55,11 +63,13 @@ impl SegmentSerializer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Finalize the segment serialization.
|
/// Finalize the segment serialization.
|
||||||
pub fn close(self) -> crate::Result<()> {
|
pub fn close(mut self) -> crate::Result<()> {
|
||||||
|
if let Some(fieldnorms_serializer) = self.extract_fieldnorms_serializer() {
|
||||||
|
fieldnorms_serializer.close()?;
|
||||||
|
}
|
||||||
self.fast_field_serializer.close()?;
|
self.fast_field_serializer.close()?;
|
||||||
self.postings_serializer.close()?;
|
self.postings_serializer.close()?;
|
||||||
self.store_writer.close()?;
|
self.store_writer.close()?;
|
||||||
self.fieldnorms_serializer.close()?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ fn merge(
|
|||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> crate::Result<SegmentEntry> {
|
) -> crate::Result<SegmentEntry> {
|
||||||
// first we need to apply deletes to our segment.
|
// first we need to apply deletes to our segment.
|
||||||
let mut merged_segment = index.new_segment();
|
let merged_segment = index.new_segment();
|
||||||
|
|
||||||
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
@@ -131,12 +131,13 @@ fn merge(
|
|||||||
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
||||||
|
|
||||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
|
||||||
|
|
||||||
let num_docs = merger.write(segment_serializer)?;
|
let num_docs = merger.write(segment_serializer)?;
|
||||||
|
|
||||||
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
let merged_segment_id = merged_segment.id();
|
||||||
|
|
||||||
|
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
|
||||||
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -521,7 +522,7 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Upon termination of the current merging threads,
|
/// Upon termination of the current merging threads,
|
||||||
/// merge opportunity may appear.
|
/// merge opportunity may appear.
|
||||||
//
|
///
|
||||||
/// We keep waiting until the merge policy judges that
|
/// We keep waiting until the merge policy judges that
|
||||||
/// no opportunity is available.
|
/// no opportunity is available.
|
||||||
///
|
///
|
||||||
@@ -554,7 +555,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -607,7 +608,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
@@ -678,7 +679,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use super::operation::AddOperation;
|
|||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::fastfield::FastFieldsWriter;
|
use crate::fastfield::FastFieldsWriter;
|
||||||
use crate::fieldnorm::FieldNormsWriter;
|
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
||||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||||
use crate::postings::compute_table_size;
|
use crate::postings::compute_table_size;
|
||||||
use crate::postings::MultiFieldPostingsWriter;
|
use crate::postings::MultiFieldPostingsWriter;
|
||||||
@@ -14,10 +14,9 @@ use crate::schema::{Field, FieldEntry};
|
|||||||
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
||||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
||||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
||||||
use crate::DocId;
|
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
|
use crate::{DocId, SegmentComponent};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::str;
|
|
||||||
|
|
||||||
/// Computes the initial size of the hash table.
|
/// Computes the initial size of the hash table.
|
||||||
///
|
///
|
||||||
@@ -48,6 +47,7 @@ pub struct SegmentWriter {
|
|||||||
fieldnorms_writer: FieldNormsWriter,
|
fieldnorms_writer: FieldNormsWriter,
|
||||||
doc_opstamps: Vec<Opstamp>,
|
doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
tokenizers: Vec<Option<TextAnalyzer>>,
|
||||||
|
term_buffer: Term,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentWriter {
|
impl SegmentWriter {
|
||||||
@@ -62,11 +62,12 @@ impl SegmentWriter {
|
|||||||
/// - schema
|
/// - schema
|
||||||
pub fn for_segment(
|
pub fn for_segment(
|
||||||
memory_budget: usize,
|
memory_budget: usize,
|
||||||
mut segment: Segment,
|
segment: Segment,
|
||||||
schema: &Schema,
|
schema: &Schema,
|
||||||
) -> crate::Result<SegmentWriter> {
|
) -> crate::Result<SegmentWriter> {
|
||||||
|
let tokenizer_manager = segment.index().tokenizers().clone();
|
||||||
let table_num_bits = initial_table_size(memory_budget)?;
|
let table_num_bits = initial_table_size(memory_budget)?;
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
let segment_serializer = SegmentSerializer::for_segment(segment)?;
|
||||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||||
let tokenizers = schema
|
let tokenizers = schema
|
||||||
.fields()
|
.fields()
|
||||||
@@ -76,7 +77,7 @@ impl SegmentWriter {
|
|||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.and_then(|text_index_option| {
|
.and_then(|text_index_option| {
|
||||||
let tokenizer_name = &text_index_option.tokenizer();
|
let tokenizer_name = &text_index_option.tokenizer();
|
||||||
segment.index().tokenizers().get(tokenizer_name)
|
tokenizer_manager.get(tokenizer_name)
|
||||||
}),
|
}),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
@@ -90,6 +91,7 @@ impl SegmentWriter {
|
|||||||
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
||||||
doc_opstamps: Vec::with_capacity(1_000),
|
doc_opstamps: Vec::with_capacity(1_000),
|
||||||
tokenizers,
|
tokenizers,
|
||||||
|
term_buffer: Term::new(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,24 +129,26 @@ impl SegmentWriter {
|
|||||||
if !field_options.is_indexed() {
|
if !field_options.is_indexed() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
let (term_buffer, multifield_postings) =
|
||||||
|
(&mut self.term_buffer, &mut self.multifield_postings);
|
||||||
match *field_options.field_type() {
|
match *field_options.field_type() {
|
||||||
FieldType::HierarchicalFacet => {
|
FieldType::HierarchicalFacet => {
|
||||||
let facets: Vec<&str> = field_values
|
term_buffer.set_field(field);
|
||||||
.iter()
|
let facets =
|
||||||
.flat_map(|field_value| match *field_value.value() {
|
field_values
|
||||||
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
.iter()
|
||||||
_ => {
|
.flat_map(|field_value| match *field_value.value() {
|
||||||
panic!("Expected hierarchical facet");
|
Value::Facet(ref facet) => Some(facet.encoded_str()),
|
||||||
}
|
_ => {
|
||||||
})
|
panic!("Expected hierarchical facet");
|
||||||
.collect();
|
}
|
||||||
let mut term = Term::for_field(field); // we set the Term
|
});
|
||||||
for fake_str in facets {
|
for fake_str in facets {
|
||||||
let mut unordered_term_id_opt = None;
|
let mut unordered_term_id_opt = None;
|
||||||
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
|
||||||
term.set_text(&token.text);
|
term_buffer.set_text(&token.text);
|
||||||
let unordered_term_id =
|
let unordered_term_id =
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
unordered_term_id_opt = Some(unordered_term_id);
|
unordered_term_id_opt = Some(unordered_term_id);
|
||||||
});
|
});
|
||||||
if let Some(unordered_term_id) = unordered_term_id_opt {
|
if let Some(unordered_term_id) = unordered_term_id_opt {
|
||||||
@@ -167,7 +171,6 @@ impl SegmentWriter {
|
|||||||
if let Some(last_token) = tok_str.tokens.last() {
|
if let Some(last_token) = tok_str.tokens.last() {
|
||||||
total_offset += last_token.offset_to;
|
total_offset += last_token.offset_to;
|
||||||
}
|
}
|
||||||
|
|
||||||
token_streams
|
token_streams
|
||||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||||
}
|
}
|
||||||
@@ -177,7 +180,6 @@ impl SegmentWriter {
|
|||||||
{
|
{
|
||||||
offsets.push(total_offset);
|
offsets.push(total_offset);
|
||||||
total_offset += text.len();
|
total_offset += text.len();
|
||||||
|
|
||||||
token_streams.push(tokenizer.token_stream(text));
|
token_streams.push(tokenizer.token_stream(text));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -189,8 +191,12 @@ impl SegmentWriter {
|
|||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
||||||
self.multifield_postings
|
multifield_postings.index_text(
|
||||||
.index_text(doc_id, field, &mut token_stream)
|
doc_id,
|
||||||
|
field,
|
||||||
|
&mut token_stream,
|
||||||
|
term_buffer,
|
||||||
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||||
@@ -198,44 +204,36 @@ impl SegmentWriter {
|
|||||||
FieldType::U64(ref int_option) => {
|
FieldType::U64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_u64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
term_buffer.set_u64(field_value.value().u64_value());
|
||||||
field_value.value().u64_value(),
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
);
|
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Date(ref int_option) => {
|
FieldType::Date(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_i64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
term_buffer.set_i64(field_value.value().date_value().timestamp());
|
||||||
field_value.value().date_value().timestamp(),
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
);
|
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::I64(ref int_option) => {
|
FieldType::I64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_i64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
term_buffer.set_i64(field_value.value().i64_value());
|
||||||
field_value.value().i64_value(),
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
);
|
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::F64(ref int_option) => {
|
FieldType::F64(ref int_option) => {
|
||||||
if int_option.is_indexed() {
|
if int_option.is_indexed() {
|
||||||
for field_value in field_values {
|
for field_value in field_values {
|
||||||
let term = Term::from_field_f64(
|
term_buffer.set_field(field_value.field());
|
||||||
field_value.field(),
|
term_buffer.set_f64(field_value.value().f64_value());
|
||||||
field_value.value().f64_value(),
|
multifield_postings.subscribe(doc_id, &term_buffer);
|
||||||
);
|
|
||||||
self.multifield_postings.subscribe(doc_id, &term);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -280,9 +278,16 @@ fn write(
|
|||||||
fieldnorms_writer: &FieldNormsWriter,
|
fieldnorms_writer: &FieldNormsWriter,
|
||||||
mut serializer: SegmentSerializer,
|
mut serializer: SegmentSerializer,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
|
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||||
|
fieldnorms_writer.serialize(fieldnorms_serializer)?;
|
||||||
|
}
|
||||||
|
let fieldnorm_data = serializer
|
||||||
|
.segment()
|
||||||
|
.open_read(SegmentComponent::FIELDNORMS)?;
|
||||||
|
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
|
||||||
|
let term_ord_map =
|
||||||
|
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
|
||||||
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
|
||||||
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
|
|
||||||
serializer.close()?;
|
serializer.close()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
409
src/lib.rs
409
src/lib.rs
@@ -173,7 +173,7 @@ use once_cell::sync::Lazy;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Index format version.
|
/// Index format version.
|
||||||
const INDEX_FORMAT_VERSION: u32 = 1;
|
const INDEX_FORMAT_VERSION: u32 = 2;
|
||||||
|
|
||||||
/// Structure version for the index.
|
/// Structure version for the index.
|
||||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
@@ -245,11 +245,10 @@ pub type DocId = u32;
|
|||||||
/// with opstamp `n+1`.
|
/// with opstamp `n+1`.
|
||||||
pub type Opstamp = u64;
|
pub type Opstamp = u64;
|
||||||
|
|
||||||
/// A f32 that represents the relevance of the document to the query
|
/// A Score that represents the relevance of the document to the query
|
||||||
///
|
///
|
||||||
/// This is modelled internally as a `f32`. The
|
/// This is modelled internally as a `f32`. The larger the number, the more relevant
|
||||||
/// larger the number, the more relevant the document
|
/// the document to the search query.
|
||||||
/// to the search
|
|
||||||
pub type Score = f32;
|
pub type Score = f32;
|
||||||
|
|
||||||
/// A `SegmentLocalId` identifies a segment.
|
/// A `SegmentLocalId` identifies a segment.
|
||||||
@@ -282,7 +281,6 @@ pub struct DocAddress(pub SegmentLocalId, pub DocId);
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
@@ -290,7 +288,6 @@ mod tests {
|
|||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::IndexWriter;
|
|
||||||
use crate::Postings;
|
use crate::Postings;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use rand::distributions::Bernoulli;
|
use rand::distributions::Bernoulli;
|
||||||
@@ -298,17 +295,26 @@ mod tests {
|
|||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::{Rng, SeedableRng};
|
use rand::{Rng, SeedableRng};
|
||||||
|
|
||||||
pub fn assert_nearly_equals(expected: f32, val: f32) {
|
/// Checks if left and right are close one to each other.
|
||||||
assert!(
|
/// Panics if the two values are more than 0.5% apart.
|
||||||
nearly_equals(val, expected),
|
#[macro_export]
|
||||||
"Got {}, expected {}.",
|
macro_rules! assert_nearly_equals {
|
||||||
val,
|
($left:expr, $right:expr) => {{
|
||||||
expected
|
match (&$left, &$right) {
|
||||||
);
|
(left_val, right_val) => {
|
||||||
}
|
let diff = (left_val - right_val).abs();
|
||||||
|
let add = left_val.abs() + right_val.abs();
|
||||||
pub fn nearly_equals(a: f32, b: f32) -> bool {
|
if diff > 0.0005 * add {
|
||||||
(a - b).abs() < 0.0005 * (a + b).abs()
|
panic!(
|
||||||
|
r#"assertion failed: `(left ~= right)`
|
||||||
|
left: `{:?}`,
|
||||||
|
right: `{:?}`"#,
|
||||||
|
&*left_val, &*right_val
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
||||||
@@ -346,14 +352,14 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
fn test_indexing() {
|
fn test_indexing() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_from_tempdir(schema).unwrap();
|
let index = Index::create_from_tempdir(schema).unwrap();
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
{
|
{
|
||||||
let doc = doc!(text_field=>"af b");
|
let doc = doc!(text_field=>"af b");
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
@@ -368,29 +374,30 @@ mod tests {
|
|||||||
}
|
}
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_docfreq1() {
|
fn test_docfreq1() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"a"));
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
index_writer.add_document(doc!(text_field=>"a a"));
|
index_writer.add_document(doc!(text_field=>"a a"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field=>"c"));
|
index_writer.add_document(doc!(text_field=>"c"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term_a = Term::from_field_text(text_field, "a");
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
assert_eq!(searcher.doc_freq(&term_a), 3);
|
assert_eq!(searcher.doc_freq(&term_a), 3);
|
||||||
@@ -401,67 +408,50 @@ mod tests {
|
|||||||
let term_d = Term::from_field_text(text_field, "d");
|
let term_d = Term::from_field_text(text_field, "d");
|
||||||
assert_eq!(searcher.doc_freq(&term_d), 0);
|
assert_eq!(searcher.doc_freq(&term_d), 0);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_fieldnorm_no_docs_with_field() {
|
fn test_fieldnorm_no_docs_with_field() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title_field = schema_builder.add_text_field("title", TEXT);
|
let title_field = schema_builder.add_text_field("title", TEXT);
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let index_reader = index.reader()?;
|
||||||
|
let searcher = index_reader.searcher();
|
||||||
|
let reader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?;
|
||||||
{
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
||||||
let doc = doc!(text_field=>"a b c");
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let index_reader = index.reader().unwrap();
|
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?;
|
||||||
let searcher = index_reader.searcher();
|
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
||||||
let reader = searcher.segment_reader(0);
|
|
||||||
{
|
|
||||||
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field);
|
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_fieldnorm() {
|
fn test_fieldnorm() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
let index = Index::create_in_ram(schema_builder.build());
|
||||||
{
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
{
|
index_writer.add_document(doc!());
|
||||||
let doc = doc!(text_field=>"a b c");
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
index_writer.add_document(doc);
|
index_writer.commit()?;
|
||||||
}
|
let reader = index.reader()?;
|
||||||
{
|
let searcher = reader.searcher();
|
||||||
let doc = doc!();
|
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||||
index_writer.add_document(doc);
|
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||||
}
|
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
||||||
{
|
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
||||||
let doc = doc!(text_field=>"a b");
|
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
||||||
index_writer.add_document(doc);
|
Ok(())
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
|
||||||
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
|
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
|
||||||
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
||||||
@@ -476,7 +466,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_postings1() {
|
fn test_delete_postings1() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let term_abcd = Term::from_field_text(text_field, "abcd");
|
let term_abcd = Term::from_field_text(text_field, "abcd");
|
||||||
@@ -492,7 +482,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
// 0
|
// 0
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
// 1
|
// 1
|
||||||
@@ -508,10 +498,10 @@ mod tests {
|
|||||||
index_writer.add_document(doc!(text_field=>" b c"));
|
index_writer.add_document(doc!(text_field=>" b c"));
|
||||||
// 5
|
// 5
|
||||||
index_writer.add_document(doc!(text_field=>" a"));
|
index_writer.add_document(doc!(text_field=>" a"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(text_field);
|
let inverted_index = segment_reader.inverted_index(text_field);
|
||||||
@@ -539,15 +529,15 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
// 0
|
// 0
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
// 1
|
// 1
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||||
index_writer.rollback().unwrap();
|
index_writer.rollback()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let seg_reader = searcher.segment_reader(0);
|
let seg_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = seg_reader.inverted_index(term_abcd.field());
|
let inverted_index = seg_reader.inverted_index(term_abcd.field());
|
||||||
@@ -576,15 +566,15 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||||
index_writer.rollback().unwrap();
|
index_writer.rollback()?;
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "a"));
|
index_writer.delete_term(Term::from_field_text(text_field, "a"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
reader.reload().unwrap();
|
reader.reload()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let inverted_index = segment_reader.inverted_index(term_abcd.field());
|
let inverted_index = segment_reader.inverted_index(term_abcd.field());
|
||||||
@@ -616,19 +606,20 @@ mod tests {
|
|||||||
assert!(!advance_undeleted(&mut postings, segment_reader));
|
assert!(!advance_undeleted(&mut postings, segment_reader));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_u64() {
|
fn test_indexed_u64() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let field = schema_builder.add_u64_field("value", INDEXED);
|
let field = schema_builder.add_u64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(field=>1u64));
|
index_writer.add_document(doc!(field=>1u64));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_u64(field, 1u64);
|
let term = Term::from_field_u64(field, 1u64);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
@@ -638,20 +629,21 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_i64() {
|
fn test_indexed_i64() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let value_field = schema_builder.add_i64_field("value", INDEXED);
|
let value_field = schema_builder.add_i64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let negative_val = -1i64;
|
let negative_val = -1i64;
|
||||||
index_writer.add_document(doc!(value_field => negative_val));
|
index_writer.add_document(doc!(value_field => negative_val));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_i64(value_field, negative_val);
|
let term = Term::from_field_i64(value_field, negative_val);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
@@ -661,20 +653,21 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexed_f64() {
|
fn test_indexed_f64() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let value_field = schema_builder.add_f64_field("value", INDEXED);
|
let value_field = schema_builder.add_f64_field("value", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let val = std::f64::consts::PI;
|
let val = std::f64::consts::PI;
|
||||||
index_writer.add_document(doc!(value_field => val));
|
index_writer.add_document(doc!(value_field => val));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let term = Term::from_field_f64(value_field, val);
|
let term = Term::from_field_f64(value_field, val);
|
||||||
let mut postings = searcher
|
let mut postings = searcher
|
||||||
@@ -684,26 +677,29 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(postings.doc(), 0);
|
assert_eq!(postings.doc(), 0);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexedfield_not_in_documents() {
|
fn test_indexedfield_not_in_documents() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let absent_field = schema_builder.add_text_field("text", TEXT);
|
let absent_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(text_field=>"a"));
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
segment_reader.inverted_index(absent_field); //< should not panic
|
let inverted_index = segment_reader.inverted_index(absent_field); //< should not panic
|
||||||
|
assert_eq!(inverted_index.terms().num_terms(), 0);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_postings2() {
|
fn test_delete_postings2() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -711,53 +707,40 @@ mod tests {
|
|||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()?;
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
index_writer.add_document(doc!(text_field=>"63"));
|
||||||
let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
index_writer.add_document(doc!(text_field=>"70"));
|
||||||
let doc = doc!(text_field=>val);
|
index_writer.add_document(doc!(text_field=>"34"));
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc!(text_field=>"1"));
|
||||||
};
|
index_writer.add_document(doc!(text_field=>"38"));
|
||||||
|
index_writer.add_document(doc!(text_field=>"33"));
|
||||||
let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
index_writer.add_document(doc!(text_field=>"40"));
|
||||||
let delterm = Term::from_field_text(text_field, val);
|
index_writer.add_document(doc!(text_field=>"17"));
|
||||||
index_writer.delete_term(delterm);
|
index_writer.delete_term(Term::from_field_text(text_field, "38"));
|
||||||
};
|
index_writer.delete_term(Term::from_field_text(text_field, "34"));
|
||||||
|
index_writer.commit()?;
|
||||||
add_document(&mut index_writer, "63");
|
reader.reload()?;
|
||||||
add_document(&mut index_writer, "70");
|
assert_eq!(reader.searcher().num_docs(), 6);
|
||||||
add_document(&mut index_writer, "34");
|
Ok(())
|
||||||
add_document(&mut index_writer, "1");
|
|
||||||
add_document(&mut index_writer, "38");
|
|
||||||
add_document(&mut index_writer, "33");
|
|
||||||
add_document(&mut index_writer, "40");
|
|
||||||
add_document(&mut index_writer, "17");
|
|
||||||
remove_document(&mut index_writer, "38");
|
|
||||||
remove_document(&mut index_writer, "34");
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
reader.reload().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
assert_eq!(searcher.num_docs(), 6);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_termfreq() {
|
fn test_termfreq() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let doc = doc!(text_field=>"af af af bc bc");
|
index_writer.add_document(doc!(text_field=>"af af af bc bc"));
|
||||||
index_writer.add_document(doc);
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let index_reader = index.reader().unwrap();
|
let index_reader = index.reader()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
let inverted_index = reader.inverted_index(text_field);
|
let inverted_index = reader.inverted_index(text_field);
|
||||||
@@ -773,63 +756,63 @@ mod tests {
|
|||||||
assert_eq!(postings.term_freq(), 3);
|
assert_eq!(postings.term_freq(), 3);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_searcher_1() {
|
fn test_searcher_1() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
{
|
// writing the segment
|
||||||
// writing the segment
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
index_writer.add_document(doc!(text_field=>"af af af b"));
|
||||||
index_writer.add_document(doc!(text_field=>"af af af b"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
reader.reload()?;
|
||||||
{
|
let searcher = reader.searcher();
|
||||||
reader.reload().unwrap();
|
let get_doc_ids = |terms: Vec<Term>| {
|
||||||
let searcher = reader.searcher();
|
let query = BooleanQuery::new_multiterms_query(terms);
|
||||||
let get_doc_ids = |terms: Vec<Term>| {
|
searcher
|
||||||
let query = BooleanQuery::new_multiterms_query(terms);
|
.search(&query, &TEST_COLLECTOR_WITH_SCORE)
|
||||||
let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
|
.map(|topdocs| topdocs.docs().to_vec())
|
||||||
topdocs.docs().to_vec()
|
};
|
||||||
};
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
|
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
|
vec![DocAddress(0, 0)]
|
||||||
vec![DocAddress(0, 0)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
|
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "c")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
|
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![Term::from_field_text(text_field, "d")])?,
|
||||||
get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
|
vec![DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 2)]
|
);
|
||||||
);
|
assert_eq!(
|
||||||
assert_eq!(
|
get_doc_ids(vec![
|
||||||
get_doc_ids(vec![
|
Term::from_field_text(text_field, "b"),
|
||||||
Term::from_field_text(text_field, "b"),
|
Term::from_field_text(text_field, "a"),
|
||||||
Term::from_field_text(text_field, "a"),
|
])?,
|
||||||
]),
|
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
||||||
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
|
);
|
||||||
);
|
Ok(())
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_searcher_2() {
|
fn test_searcher_2() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
@@ -837,19 +820,17 @@ mod tests {
|
|||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()?;
|
||||||
.unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0u64);
|
assert_eq!(reader.searcher().num_docs(), 0u64);
|
||||||
{
|
// writing the segment
|
||||||
// writing the segment
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
index_writer.add_document(doc!(text_field=>"af b"));
|
||||||
index_writer.add_document(doc!(text_field=>"af b"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
reader.reload()?;
|
||||||
}
|
|
||||||
reader.reload().unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 3u64);
|
assert_eq!(reader.searcher().num_docs(), 3u64);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -871,7 +852,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_wrong_fast_field_type() {
|
fn test_wrong_fast_field_type() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
|
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
|
||||||
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
|
||||||
@@ -881,14 +862,14 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
{
|
{
|
||||||
let document =
|
let document =
|
||||||
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
|
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
|
||||||
index_writer.add_document(document);
|
index_writer.add_document(document);
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
}
|
}
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader()?;
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
@@ -927,11 +908,12 @@ mod tests {
|
|||||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||||
assert_eq!(fast_field_reader.get(0), 4f64)
|
assert_eq!(fast_field_reader.get(0), 4f64)
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// motivated by #729
|
// motivated by #729
|
||||||
#[test]
|
#[test]
|
||||||
fn test_update_via_delete_insert() {
|
fn test_update_via_delete_insert() -> crate::Result<()> {
|
||||||
use crate::collector::Count;
|
use crate::collector::Count;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::AllQuery;
|
use crate::query::AllQuery;
|
||||||
@@ -945,17 +927,17 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
let index_reader = index.reader().unwrap();
|
let index_reader = index.reader()?;
|
||||||
|
|
||||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
for doc_id in 0u64..DOC_COUNT {
|
||||||
index_writer.add_document(doc!(id => doc_id));
|
index_writer.add_document(doc!(id => doc_id));
|
||||||
}
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -966,12 +948,11 @@ mod tests {
|
|||||||
// update the 10 elements by deleting and re-adding
|
// update the 10 elements by deleting and re-adding
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
for doc_id in 0u64..DOC_COUNT {
|
||||||
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let doc = doc!(id => doc_id);
|
index_writer.add_document(doc!(id => doc_id));
|
||||||
index_writer.add_document(doc);
|
index_writer.commit()?;
|
||||||
index_writer.commit().unwrap();
|
index_reader.reload()?;
|
||||||
index_reader.reload().unwrap();
|
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
// The number of document should be stable.
|
// The number of document should be stable.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@@ -980,7 +961,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
let segment_ids: Vec<SegmentId> = searcher
|
let segment_ids: Vec<SegmentId> = searcher
|
||||||
.segment_readers()
|
.segment_readers()
|
||||||
@@ -989,12 +970,18 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
block_on(index_writer.merge(&segment_ids)).unwrap();
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
index_reader.reload()?;
|
||||||
let searcher = index_reader.searcher();
|
let searcher = index_reader.searcher();
|
||||||
|
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(
|
#[test]
|
||||||
searcher.search(&AllQuery, &Count).unwrap(),
|
fn test_validate_checksum() -> crate::Result<()> {
|
||||||
DOC_COUNT as usize
|
let index_path = tempfile::tempdir().expect("dir");
|
||||||
);
|
let schema = Schema::builder().build();
|
||||||
|
let index = Index::create_in_dir(&index_path, schema)?;
|
||||||
|
assert!(index.validate_checksum()?.is_empty());
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -78,6 +78,7 @@ impl Positions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct PositionReader {
|
pub struct PositionReader {
|
||||||
skip_read: OwnedRead,
|
skip_read: OwnedRead,
|
||||||
position_read: OwnedRead,
|
position_read: OwnedRead,
|
||||||
|
|||||||
@@ -1,11 +1,21 @@
|
|||||||
use crate::common::{BinarySerializable, VInt};
|
use crate::common::{BinarySerializable, VInt};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::ReadOnlySource;
|
||||||
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::postings::compression::{
|
use crate::postings::compression::{
|
||||||
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
|
||||||
};
|
};
|
||||||
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
|
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
|
||||||
|
use crate::query::BM25Weight;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::{DocId, TERMINATED};
|
use crate::{DocId, Score, TERMINATED};
|
||||||
|
|
||||||
|
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
|
||||||
|
if let Some(first) = it.next() {
|
||||||
|
Some(it.fold(first, Score::max))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
/// `BlockSegmentPostings` is a cursor iterating over blocks
|
||||||
/// of documents.
|
/// of documents.
|
||||||
@@ -14,16 +24,18 @@ use crate::{DocId, TERMINATED};
|
|||||||
///
|
///
|
||||||
/// While it is useful for some very specific high-performance
|
/// While it is useful for some very specific high-performance
|
||||||
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
/// use cases, you should prefer using `SegmentPostings` for most usage.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct BlockSegmentPostings {
|
pub struct BlockSegmentPostings {
|
||||||
pub(crate) doc_decoder: BlockDecoder,
|
pub(crate) doc_decoder: BlockDecoder,
|
||||||
loaded_offset: usize,
|
loaded_offset: usize,
|
||||||
freq_decoder: BlockDecoder,
|
freq_decoder: BlockDecoder,
|
||||||
freq_reading_option: FreqReadingOption,
|
freq_reading_option: FreqReadingOption,
|
||||||
|
block_max_score_cache: Option<Score>,
|
||||||
|
|
||||||
doc_freq: usize,
|
doc_freq: u32,
|
||||||
|
|
||||||
data: ReadOnlySource,
|
data: ReadOnlySource,
|
||||||
skip_reader: SkipReader,
|
pub(crate) skip_reader: SkipReader,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_bitpacked_block(
|
fn decode_bitpacked_block(
|
||||||
@@ -47,10 +59,14 @@ fn decode_vint_block(
|
|||||||
doc_offset: DocId,
|
doc_offset: DocId,
|
||||||
num_vint_docs: usize,
|
num_vint_docs: usize,
|
||||||
) {
|
) {
|
||||||
doc_decoder.clear();
|
let num_consumed_bytes =
|
||||||
let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
|
doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs, TERMINATED);
|
||||||
if let Some(freq_decoder) = freq_decoder_opt {
|
if let Some(freq_decoder) = freq_decoder_opt {
|
||||||
freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
|
freq_decoder.uncompress_vint_unsorted(
|
||||||
|
&data[num_consumed_bytes..],
|
||||||
|
num_vint_docs,
|
||||||
|
TERMINATED,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,20 +105,63 @@ impl BlockSegmentPostings {
|
|||||||
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
|
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
|
||||||
};
|
};
|
||||||
|
|
||||||
let doc_freq = doc_freq as usize;
|
|
||||||
let mut block_segment_postings = BlockSegmentPostings {
|
let mut block_segment_postings = BlockSegmentPostings {
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||||
loaded_offset: std::usize::MAX,
|
loaded_offset: std::usize::MAX,
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
freq_decoder: BlockDecoder::with_val(1),
|
||||||
freq_reading_option,
|
freq_reading_option,
|
||||||
|
block_max_score_cache: None,
|
||||||
doc_freq,
|
doc_freq,
|
||||||
data: postings_data,
|
data: postings_data,
|
||||||
skip_reader,
|
skip_reader,
|
||||||
};
|
};
|
||||||
block_segment_postings.advance();
|
block_segment_postings.load_block();
|
||||||
block_segment_postings
|
block_segment_postings
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the block_max_score for the current block.
|
||||||
|
/// It does not require the block to be loaded. For instance, it is ok to call this method
|
||||||
|
/// after having called `.shallow_advance(..)`.
|
||||||
|
///
|
||||||
|
/// See `TermScorer::block_max_score(..)` for more information.
|
||||||
|
pub fn block_max_score(
|
||||||
|
&mut self,
|
||||||
|
fieldnorm_reader: &FieldNormReader,
|
||||||
|
bm25_weight: &BM25Weight,
|
||||||
|
) -> Score {
|
||||||
|
if let Some(score) = self.block_max_score_cache {
|
||||||
|
return score;
|
||||||
|
}
|
||||||
|
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
|
||||||
|
// if we are on a full block, the skip reader should have the block max information
|
||||||
|
// for us
|
||||||
|
self.block_max_score_cache = Some(skip_reader_max_score);
|
||||||
|
return skip_reader_max_score;
|
||||||
|
}
|
||||||
|
// this is the last block of the segment posting list.
|
||||||
|
// If it is actually loaded, we can compute block max manually.
|
||||||
|
if self.block_is_loaded() {
|
||||||
|
let docs = self.doc_decoder.output_array().iter().cloned();
|
||||||
|
let freqs = self.freq_decoder.output_array().iter().cloned();
|
||||||
|
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
|
||||||
|
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
|
||||||
|
bm25_weight.score(fieldnorm_id, term_freq)
|
||||||
|
});
|
||||||
|
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
|
||||||
|
self.block_max_score_cache = Some(block_max_score);
|
||||||
|
return block_max_score;
|
||||||
|
}
|
||||||
|
// We do not have access to any good block max value. We return bm25_weight.max_score()
|
||||||
|
// as it is a valid upperbound.
|
||||||
|
//
|
||||||
|
// We do not cache it however, so that it gets computed when once block is loaded.
|
||||||
|
bm25_weight.max_score()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
|
||||||
|
self.freq_reading_option
|
||||||
|
}
|
||||||
|
|
||||||
// Resets the block segment postings on another position
|
// Resets the block segment postings on another position
|
||||||
// in the postings file.
|
// in the postings file.
|
||||||
//
|
//
|
||||||
@@ -116,21 +175,23 @@ impl BlockSegmentPostings {
|
|||||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
|
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
|
||||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
|
||||||
self.data = ReadOnlySource::new(postings_data);
|
self.data = ReadOnlySource::new(postings_data);
|
||||||
self.loaded_offset = std::usize::MAX;
|
self.block_max_score_cache = None;
|
||||||
self.loaded_offset = std::usize::MAX;
|
self.loaded_offset = std::usize::MAX;
|
||||||
if let Some(skip_data) = skip_data_opt {
|
if let Some(skip_data) = skip_data_opt {
|
||||||
self.skip_reader.reset(skip_data, doc_freq);
|
self.skip_reader.reset(skip_data, doc_freq);
|
||||||
} else {
|
} else {
|
||||||
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
|
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
|
||||||
}
|
}
|
||||||
self.doc_freq = doc_freq as usize;
|
self.doc_freq = doc_freq;
|
||||||
|
self.load_block();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the document frequency associated to this block postings.
|
/// Returns the overall number of documents in the block postings.
|
||||||
|
/// It does not take in account whether documents are deleted or not.
|
||||||
///
|
///
|
||||||
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
/// This `doc_freq` is simply the sum of the length of all of the blocks
|
||||||
/// length, and it does not take in account deleted documents.
|
/// length, and it does not take in account deleted documents.
|
||||||
pub fn doc_freq(&self) -> usize {
|
pub fn doc_freq(&self) -> u32 {
|
||||||
self.doc_freq
|
self.doc_freq
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,11 +201,20 @@ impl BlockSegmentPostings {
|
|||||||
/// returned by `.docs()` is empty.
|
/// returned by `.docs()` is empty.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn docs(&self) -> &[DocId] {
|
pub fn docs(&self) -> &[DocId] {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.doc_decoder.output_array()
|
self.doc_decoder.output_array()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
|
||||||
|
/// as it happens for the last block of the posting list).
|
||||||
|
///
|
||||||
|
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
|
||||||
|
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
|
||||||
|
///
|
||||||
|
/// This method is useful to run SSE2 linear search.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
|
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.doc_decoder.output_aligned()
|
self.doc_decoder.output_aligned()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,12 +227,14 @@ impl BlockSegmentPostings {
|
|||||||
/// Return the array of `term freq` in the block.
|
/// Return the array of `term freq` in the block.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn freqs(&self) -> &[u32] {
|
pub fn freqs(&self) -> &[u32] {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.freq_decoder.output_array()
|
self.freq_decoder.output_array()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the frequency at index `idx` of the block.
|
/// Return the frequency at index `idx` of the block.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn freq(&self, idx: usize) -> u32 {
|
pub fn freq(&self, idx: usize) -> u32 {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.freq_decoder.output(idx)
|
self.freq_decoder.output(idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -173,23 +245,40 @@ impl BlockSegmentPostings {
|
|||||||
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn block_len(&self) -> usize {
|
pub fn block_len(&self) -> usize {
|
||||||
|
debug_assert!(self.block_is_loaded());
|
||||||
self.doc_decoder.output_len
|
self.doc_decoder.output_len
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn position_offset(&self) -> u64 {
|
|
||||||
self.skip_reader.position_offset()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Position on a block that may contains `target_doc`.
|
/// Position on a block that may contains `target_doc`.
|
||||||
///
|
///
|
||||||
/// If all docs are smaller than target, the block loaded may be empty,
|
/// If all docs are smaller than target, the block loaded may be empty,
|
||||||
/// or be the last an incomplete VInt block.
|
/// or be the last an incomplete VInt block.
|
||||||
pub fn seek(&mut self, target_doc: DocId) {
|
pub fn seek(&mut self, target_doc: DocId) {
|
||||||
self.skip_reader.seek(target_doc);
|
self.shallow_seek(target_doc);
|
||||||
self.load_block();
|
self.load_block();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_block(&mut self) {
|
pub(crate) fn position_offset(&self) -> u64 {
|
||||||
|
self.skip_reader.position_offset()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Dangerous API! This calls seek on the skip list,
|
||||||
|
/// but does not `.load_block()` afterwards.
|
||||||
|
///
|
||||||
|
/// `.load_block()` needs to be called manually afterwards.
|
||||||
|
/// If all docs are smaller than target, the block loaded may be empty,
|
||||||
|
/// or be the last an incomplete VInt block.
|
||||||
|
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
|
||||||
|
if self.skip_reader.seek(target_doc) {
|
||||||
|
self.block_max_score_cache = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn block_is_loaded(&self) -> bool {
|
||||||
|
self.loaded_offset == self.skip_reader.byte_offset()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn load_block(&mut self) {
|
||||||
let offset = self.skip_reader.byte_offset();
|
let offset = self.skip_reader.byte_offset();
|
||||||
if self.loaded_offset == offset {
|
if self.loaded_offset == offset {
|
||||||
return;
|
return;
|
||||||
@@ -214,7 +303,14 @@ impl BlockSegmentPostings {
|
|||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
BlockInfo::VInt(num_vint_docs) => {
|
BlockInfo::VInt { num_docs } => {
|
||||||
|
let data = {
|
||||||
|
if num_docs == 0 {
|
||||||
|
&[]
|
||||||
|
} else {
|
||||||
|
&self.data.as_slice()[offset..]
|
||||||
|
}
|
||||||
|
};
|
||||||
decode_vint_block(
|
decode_vint_block(
|
||||||
&mut self.doc_decoder,
|
&mut self.doc_decoder,
|
||||||
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
|
||||||
@@ -222,9 +318,9 @@ impl BlockSegmentPostings {
|
|||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
&self.data.as_slice()[offset..],
|
data,
|
||||||
self.skip_reader.last_doc_in_previous_block,
|
self.skip_reader.last_doc_in_previous_block,
|
||||||
num_vint_docs as usize,
|
num_docs as usize,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -233,21 +329,20 @@ impl BlockSegmentPostings {
|
|||||||
/// Advance to the next block.
|
/// Advance to the next block.
|
||||||
///
|
///
|
||||||
/// Returns false iff there was no remaining blocks.
|
/// Returns false iff there was no remaining blocks.
|
||||||
pub fn advance(&mut self) -> bool {
|
pub fn advance(&mut self) {
|
||||||
if !self.skip_reader.advance() {
|
self.skip_reader.advance();
|
||||||
return false;
|
self.block_max_score_cache = None;
|
||||||
}
|
|
||||||
self.load_block();
|
self.load_block();
|
||||||
true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an empty segment postings object
|
/// Returns an empty segment postings object
|
||||||
pub fn empty() -> BlockSegmentPostings {
|
pub fn empty() -> BlockSegmentPostings {
|
||||||
BlockSegmentPostings {
|
BlockSegmentPostings {
|
||||||
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
doc_decoder: BlockDecoder::with_val(TERMINATED),
|
||||||
loaded_offset: std::usize::MAX,
|
loaded_offset: 0,
|
||||||
freq_decoder: BlockDecoder::with_val(1),
|
freq_decoder: BlockDecoder::with_val(1),
|
||||||
freq_reading_option: FreqReadingOption::NoFreq,
|
freq_reading_option: FreqReadingOption::NoFreq,
|
||||||
|
block_max_score_cache: None,
|
||||||
doc_freq: 0,
|
doc_freq: 0,
|
||||||
data: ReadOnlySource::new(vec![]),
|
data: ReadOnlySource::new(vec![]),
|
||||||
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
|
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
|
||||||
@@ -273,8 +368,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_empty_segment_postings() {
|
fn test_empty_segment_postings() {
|
||||||
let mut postings = SegmentPostings::empty();
|
let mut postings = SegmentPostings::empty();
|
||||||
|
assert_eq!(postings.doc(), TERMINATED);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
assert_eq!(postings.advance(), TERMINATED);
|
assert_eq!(postings.advance(), TERMINATED);
|
||||||
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
assert_eq!(postings.len(), 0);
|
assert_eq!(postings.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,7 +391,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_empty_block_segment_postings() {
|
fn test_empty_block_segment_postings() {
|
||||||
let mut postings = BlockSegmentPostings::empty();
|
let mut postings = BlockSegmentPostings::empty();
|
||||||
assert!(!postings.advance());
|
assert!(postings.docs().is_empty());
|
||||||
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
|
postings.advance();
|
||||||
|
assert!(postings.docs().is_empty());
|
||||||
assert_eq!(postings.doc_freq(), 0);
|
assert_eq!(postings.doc_freq(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -306,13 +406,14 @@ mod tests {
|
|||||||
assert_eq!(block_segments.doc_freq(), 100_000);
|
assert_eq!(block_segments.doc_freq(), 100_000);
|
||||||
loop {
|
loop {
|
||||||
let block = block_segments.docs();
|
let block = block_segments.docs();
|
||||||
|
if block.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
for (i, doc) in block.iter().cloned().enumerate() {
|
for (i, doc) in block.iter().cloned().enumerate() {
|
||||||
assert_eq!(offset + (i as u32), doc);
|
assert_eq!(offset + (i as u32), doc);
|
||||||
}
|
}
|
||||||
offset += block.len() as u32;
|
offset += block.len() as u32;
|
||||||
if block_segments.advance() {
|
block_segments.advance();
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -354,7 +455,7 @@ mod tests {
|
|||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let mut last_doc = 0u32;
|
let mut last_doc = 0u32;
|
||||||
for &doc in docs {
|
for &doc in docs {
|
||||||
for _ in last_doc..doc {
|
for _ in last_doc..doc {
|
||||||
@@ -373,7 +474,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_block_segment_postings_skip2() {
|
fn test_block_segment_postings_seek() {
|
||||||
let mut docs = vec![0];
|
let mut docs = vec![0];
|
||||||
for i in 0..1300 {
|
for i in 0..1300 {
|
||||||
docs.push((i * i / 100) + i);
|
docs.push((i * i / 100) + i);
|
||||||
@@ -395,7 +496,7 @@ mod tests {
|
|||||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
// create two postings list, one containg even number,
|
// create two postings list, one containg even number,
|
||||||
// the other containing odd numbers.
|
// the other containing odd numbers.
|
||||||
for i in 0..6 {
|
for i in 0..6 {
|
||||||
@@ -421,7 +522,6 @@ mod tests {
|
|||||||
let term_info = inverted_index.get_term_info(&term).unwrap();
|
let term_info = inverted_index.get_term_info(&term).unwrap();
|
||||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
|
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
|
||||||
}
|
}
|
||||||
assert!(block_segments.advance());
|
|
||||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use crate::common::FixedSize;
|
use crate::common::FixedSize;
|
||||||
use crate::docset::TERMINATED;
|
|
||||||
use bitpacking::{BitPacker, BitPacker4x};
|
use bitpacking::{BitPacker, BitPacker4x};
|
||||||
|
|
||||||
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
||||||
@@ -53,8 +52,10 @@ impl BlockEncoder {
|
|||||||
/// We ensure that the OutputBuffer is align on 128 bits
|
/// We ensure that the OutputBuffer is align on 128 bits
|
||||||
/// in order to run SSE2 linear search on it.
|
/// in order to run SSE2 linear search on it.
|
||||||
#[repr(align(128))]
|
#[repr(align(128))]
|
||||||
|
#[derive(Clone)]
|
||||||
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct BlockDecoder {
|
pub struct BlockDecoder {
|
||||||
bitpacker: BitPacker4x,
|
bitpacker: BitPacker4x,
|
||||||
output: AlignedBuffer,
|
output: AlignedBuffer,
|
||||||
@@ -107,10 +108,6 @@ impl BlockDecoder {
|
|||||||
pub fn output(&self, idx: usize) -> u32 {
|
pub fn output(&self, idx: usize) -> u32 {
|
||||||
self.output.0[idx]
|
self.output.0[idx]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.output.0.iter_mut().for_each(|el| *el = TERMINATED);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait VIntEncoder {
|
pub trait VIntEncoder {
|
||||||
@@ -147,11 +144,14 @@ pub trait VIntDecoder {
|
|||||||
/// For instance, if delta encoded are `1, 3, 9`, and the
|
/// For instance, if delta encoded are `1, 3, 9`, and the
|
||||||
/// `offset` is 5, then the output will be:
|
/// `offset` is 5, then the output will be:
|
||||||
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
|
||||||
|
///
|
||||||
|
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
||||||
fn uncompress_vint_sorted(
|
fn uncompress_vint_sorted(
|
||||||
&mut self,
|
&mut self,
|
||||||
compressed_data: &[u8],
|
compressed_data: &[u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
) -> usize;
|
) -> usize;
|
||||||
|
|
||||||
/// Uncompress an array of `u32s`, compressed using variable
|
/// Uncompress an array of `u32s`, compressed using variable
|
||||||
@@ -159,7 +159,14 @@ pub trait VIntDecoder {
|
|||||||
///
|
///
|
||||||
/// The method takes a number of int to decompress, and returns
|
/// The method takes a number of int to decompress, and returns
|
||||||
/// the amount of bytes that were read to decompress them.
|
/// the amount of bytes that were read to decompress them.
|
||||||
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize;
|
///
|
||||||
|
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
|
||||||
|
fn uncompress_vint_unsorted(
|
||||||
|
&mut self,
|
||||||
|
compressed_data: &[u8],
|
||||||
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
|
) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VIntEncoder for BlockEncoder {
|
impl VIntEncoder for BlockEncoder {
|
||||||
@@ -178,13 +185,21 @@ impl VIntDecoder for BlockDecoder {
|
|||||||
compressed_data: &[u8],
|
compressed_data: &[u8],
|
||||||
offset: u32,
|
offset: u32,
|
||||||
num_els: usize,
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
|
self.output.0.iter_mut().for_each(|el| *el = padding);
|
||||||
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize {
|
fn uncompress_vint_unsorted(
|
||||||
|
&mut self,
|
||||||
|
compressed_data: &[u8],
|
||||||
|
num_els: usize,
|
||||||
|
padding: u32,
|
||||||
|
) -> usize {
|
||||||
self.output_len = num_els;
|
self.output_len = num_els;
|
||||||
|
self.output.0.iter_mut().for_each(|el| *el = padding);
|
||||||
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -193,6 +208,7 @@ impl VIntDecoder for BlockDecoder {
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::TERMINATED;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_sorted_block() {
|
fn test_encode_sorted_block() {
|
||||||
@@ -271,18 +287,20 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_encode_vint() {
|
fn test_encode_vint() {
|
||||||
{
|
const PADDING_VALUE: u32 = 234_234_345u32;
|
||||||
let expected_length = 154;
|
let expected_length = 154;
|
||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
|
||||||
for offset in &[0u32, 1u32, 2u32] {
|
for offset in &[0u32, 1u32, 2u32] {
|
||||||
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
|
||||||
assert!(encoded_data.len() <= expected_length);
|
assert!(encoded_data.len() <= expected_length);
|
||||||
let mut decoder = BlockDecoder::default();
|
let mut decoder = BlockDecoder::default();
|
||||||
let consumed_num_bytes =
|
let consumed_num_bytes =
|
||||||
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
|
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE);
|
||||||
assert_eq!(consumed_num_bytes, encoded_data.len());
|
assert_eq!(consumed_num_bytes, encoded_data.len());
|
||||||
assert_eq!(input, decoder.output_array());
|
assert_eq!(input, decoder.output_array());
|
||||||
|
for i in input.len()..COMPRESSION_BLOCK_SIZE {
|
||||||
|
assert_eq!(decoder.output(i), PADDING_VALUE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -292,6 +310,7 @@ pub mod tests {
|
|||||||
mod bench {
|
mod bench {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::TERMINATED;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rand::SeedableRng;
|
use rand::SeedableRng;
|
||||||
@@ -322,7 +341,7 @@ mod bench {
|
|||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
|
||||||
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
|
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
|
||||||
let mut decoder = BlockDecoder::new();
|
let mut decoder = BlockDecoder::default();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
|
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
|
||||||
});
|
});
|
||||||
@@ -357,9 +376,9 @@ mod bench {
|
|||||||
let mut encoder = BlockEncoder::new();
|
let mut encoder = BlockEncoder::new();
|
||||||
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
|
||||||
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
let compressed = encoder.compress_vint_sorted(&data, 0u32);
|
||||||
let mut decoder = BlockDecoder::new();
|
let mut decoder = BlockDecoder::default();
|
||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
|
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT, TERMINATED);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ Postings module (also called inverted index)
|
|||||||
mod block_search;
|
mod block_search;
|
||||||
mod block_segment_postings;
|
mod block_segment_postings;
|
||||||
pub(crate) mod compression;
|
pub(crate) mod compression;
|
||||||
|
mod field_stats;
|
||||||
mod postings;
|
mod postings;
|
||||||
mod postings_writer;
|
mod postings_writer;
|
||||||
mod recorder;
|
mod recorder;
|
||||||
@@ -15,6 +16,7 @@ mod stacker;
|
|||||||
mod term_info;
|
mod term_info;
|
||||||
|
|
||||||
pub(crate) use self::block_search::BlockSearcher;
|
pub(crate) use self::block_search::BlockSearcher;
|
||||||
|
pub(crate) use self::field_stats::{FieldStat, FieldStats};
|
||||||
|
|
||||||
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
||||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||||
@@ -65,45 +67,42 @@ pub mod tests {
|
|||||||
use std::iter;
|
use std::iter;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_position_write() {
|
pub fn test_position_write() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut segment = index.new_segment();
|
let mut segment = index.new_segment();
|
||||||
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
|
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||||
{
|
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4, None)?;
|
||||||
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
|
field_serializer.new_term("abc".as_bytes(), 12u32)?;
|
||||||
field_serializer.new_term("abc".as_bytes()).unwrap();
|
for doc_id in 0u32..120u32 {
|
||||||
for doc_id in 0u32..120u32 {
|
let delta_positions = vec![1, 2, 3, 2];
|
||||||
let delta_positions = vec![1, 2, 3, 2];
|
field_serializer.write_doc(doc_id, 4, &delta_positions)?;
|
||||||
field_serializer
|
|
||||||
.write_doc(doc_id, 4, &delta_positions)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
field_serializer.close_term().unwrap();
|
|
||||||
}
|
}
|
||||||
posting_serializer.close().unwrap();
|
field_serializer.close_term()?;
|
||||||
let read = segment.open_read(SegmentComponent::POSITIONS).unwrap();
|
posting_serializer.close()?;
|
||||||
|
let read = segment.open_read(SegmentComponent::POSITIONS)?;
|
||||||
assert!(read.len() <= 140);
|
assert!(read.len() <= 140);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_skip_positions() {
|
pub fn test_skip_positions() -> crate::Result<()> {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title = schema_builder.add_text_field("title", TEXT);
|
let title = schema_builder.add_text_field("title", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 30_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
||||||
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
||||||
for _ in 0..1_000 {
|
for _ in 0..1_000 {
|
||||||
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
index_writer.add_document(doc!(title => r#"abc abc abc"#));
|
||||||
}
|
}
|
||||||
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit()?;
|
||||||
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader()?.searcher();
|
||||||
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
|
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
|
||||||
let term = Term::from_field_text(title, "abc");
|
let term = Term::from_field_text(title, "abc");
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
@@ -158,6 +157,7 @@ pub mod tests {
|
|||||||
postings.positions(&mut positions);
|
postings.positions(&mut positions);
|
||||||
assert_eq!(&[0, 5], &positions[..]);
|
assert_eq!(&[0, 5], &positions[..]);
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -178,7 +178,7 @@ pub mod tests {
|
|||||||
.tokenizers()
|
.tokenizers()
|
||||||
.register("simple_no_truncation", SimpleTokenizer);
|
.register("simple_no_truncation", SimpleTokenizer);
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field=>exceeding_token_text));
|
index_writer.add_document(doc!(text_field=>exceeding_token_text));
|
||||||
@@ -207,7 +207,7 @@ pub mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_position_and_fieldnorm1() {
|
pub fn test_position_and_fieldnorm1() -> crate::Result<()> {
|
||||||
let mut positions = Vec::new();
|
let mut positions = Vec::new();
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
@@ -219,42 +219,38 @@ pub mod tests {
|
|||||||
let mut segment_writer =
|
let mut segment_writer =
|
||||||
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
|
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
|
||||||
{
|
{
|
||||||
let mut doc = Document::default();
|
|
||||||
// checking that position works if the field has two values
|
// checking that position works if the field has two values
|
||||||
doc.add_text(text_field, "a b a c a d a a.");
|
|
||||||
doc.add_text(text_field, "d d d d a");
|
|
||||||
let op = AddOperation {
|
let op = AddOperation {
|
||||||
opstamp: 0u64,
|
opstamp: 0u64,
|
||||||
document: doc,
|
document: doc!(
|
||||||
|
text_field => "a b a c a d a a.",
|
||||||
|
text_field => "d d d d a"
|
||||||
|
),
|
||||||
};
|
};
|
||||||
segment_writer.add_document(op, &schema).unwrap();
|
segment_writer.add_document(op, &schema)?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut doc = Document::default();
|
|
||||||
doc.add_text(text_field, "b a");
|
|
||||||
let op = AddOperation {
|
let op = AddOperation {
|
||||||
opstamp: 1u64,
|
opstamp: 1u64,
|
||||||
document: doc,
|
document: doc!(text_field => "b a"),
|
||||||
};
|
};
|
||||||
segment_writer.add_document(op, &schema).unwrap();
|
segment_writer.add_document(op, &schema).unwrap();
|
||||||
}
|
}
|
||||||
for i in 2..1000 {
|
for i in 2..1000 {
|
||||||
let mut doc = Document::default();
|
let mut text: String = iter::repeat("e ").take(i).collect();
|
||||||
let mut text = iter::repeat("e ").take(i).collect::<String>();
|
|
||||||
text.push_str(" a");
|
text.push_str(" a");
|
||||||
doc.add_text(text_field, &text);
|
|
||||||
let op = AddOperation {
|
let op = AddOperation {
|
||||||
opstamp: 2u64,
|
opstamp: 2u64,
|
||||||
document: doc,
|
document: doc!(text_field => text),
|
||||||
};
|
};
|
||||||
segment_writer.add_document(op, &schema).unwrap();
|
segment_writer.add_document(op, &schema).unwrap();
|
||||||
}
|
}
|
||||||
segment_writer.finalize().unwrap();
|
segment_writer.finalize()?;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let segment_reader = SegmentReader::open(&segment).unwrap();
|
let segment_reader = SegmentReader::open(&segment)?;
|
||||||
{
|
{
|
||||||
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field);
|
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
|
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
|
||||||
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||||
for i in 2..1000 {
|
for i in 2..1000 {
|
||||||
@@ -314,6 +310,7 @@ pub mod tests {
|
|||||||
assert_eq!(postings_e.doc(), TERMINATED);
|
assert_eq!(postings_e.doc(), TERMINATED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -324,7 +321,7 @@ pub mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "g b b d c g c"));
|
index_writer.add_document(doc!(text_field => "g b b d c g c"));
|
||||||
index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
|
index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
@@ -356,7 +353,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for i in 0u64..num_docs as u64 {
|
for i in 0u64..num_docs as u64 {
|
||||||
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
|
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
@@ -427,7 +424,7 @@ pub mod tests {
|
|||||||
|
|
||||||
// delete some of the documents
|
// delete some of the documents
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.delete_term(term_0);
|
index_writer.delete_term(term_0);
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
@@ -481,7 +478,7 @@ pub mod tests {
|
|||||||
|
|
||||||
// delete everything else
|
// delete everything else
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.delete_term(term_1);
|
index_writer.delete_term(term_1);
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
@@ -524,7 +521,7 @@ pub mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let posting_list_size = 1_000_000;
|
let posting_list_size = 1_000_000;
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for _ in 0..posting_list_size {
|
for _ in 0..posting_list_size {
|
||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
if rng.gen_bool(1f64 / 15f64) {
|
if rng.gen_bool(1f64 / 15f64) {
|
||||||
@@ -582,6 +579,9 @@ pub mod tests {
|
|||||||
) {
|
) {
|
||||||
for target in targets {
|
for target in targets {
|
||||||
let mut postings_opt = postings_factory();
|
let mut postings_opt = postings_factory();
|
||||||
|
if target < postings_opt.doc() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
|
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
|
||||||
let skip_result_opt = postings_opt.seek(target);
|
let skip_result_opt = postings_opt.seek(target);
|
||||||
let skip_result_unopt = postings_unopt.seek(target);
|
let skip_result_unopt = postings_unopt.seek(target);
|
||||||
@@ -729,7 +729,7 @@ mod bench {
|
|||||||
let mut s = 0u32;
|
let mut s = 0u32;
|
||||||
while segment_postings.doc() != TERMINATED {
|
while segment_postings.doc() != TERMINATED {
|
||||||
s += (segment_postings.doc() & n) % 1024;
|
s += (segment_postings.doc() & n) % 1024;
|
||||||
segment_postings.advance()
|
segment_postings.advance();
|
||||||
}
|
}
|
||||||
s
|
s
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use super::stacker::{Addr, MemoryArena, TermHashMap};
|
use super::stacker::{Addr, MemoryArena, TermHashMap};
|
||||||
|
|
||||||
|
use crate::fieldnorm::FieldNormReaders;
|
||||||
use crate::postings::recorder::{
|
use crate::postings::recorder::{
|
||||||
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
|
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
|
||||||
};
|
};
|
||||||
@@ -104,6 +105,7 @@ impl MultiFieldPostingsWriter {
|
|||||||
doc: DocId,
|
doc: DocId,
|
||||||
field: Field,
|
field: Field,
|
||||||
token_stream: &mut dyn TokenStream,
|
token_stream: &mut dyn TokenStream,
|
||||||
|
term_buffer: &mut Term,
|
||||||
) -> u32 {
|
) -> u32 {
|
||||||
let postings_writer =
|
let postings_writer =
|
||||||
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
|
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
|
||||||
@@ -113,6 +115,7 @@ impl MultiFieldPostingsWriter {
|
|||||||
field,
|
field,
|
||||||
token_stream,
|
token_stream,
|
||||||
&mut self.heap,
|
&mut self.heap,
|
||||||
|
term_buffer,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,6 +131,7 @@ impl MultiFieldPostingsWriter {
|
|||||||
pub fn serialize(
|
pub fn serialize(
|
||||||
&self,
|
&self,
|
||||||
serializer: &mut InvertedIndexSerializer,
|
serializer: &mut InvertedIndexSerializer,
|
||||||
|
fieldnorm_readers: FieldNormReaders,
|
||||||
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
|
||||||
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
|
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
|
||||||
self.term_index.iter().collect();
|
self.term_index.iter().collect();
|
||||||
@@ -161,8 +165,12 @@ impl MultiFieldPostingsWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
|
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
|
||||||
let mut field_serializer =
|
let fieldnorm_reader = fieldnorm_readers.get_field(field);
|
||||||
serializer.new_field(field, postings_writer.total_num_tokens())?;
|
let mut field_serializer = serializer.new_field(
|
||||||
|
field,
|
||||||
|
postings_writer.total_num_tokens(),
|
||||||
|
fieldnorm_reader,
|
||||||
|
)?;
|
||||||
postings_writer.serialize(
|
postings_writer.serialize(
|
||||||
&term_offsets[start..stop],
|
&term_offsets[start..stop],
|
||||||
&mut field_serializer,
|
&mut field_serializer,
|
||||||
@@ -214,13 +222,20 @@ pub trait PostingsWriter {
|
|||||||
field: Field,
|
field: Field,
|
||||||
token_stream: &mut dyn TokenStream,
|
token_stream: &mut dyn TokenStream,
|
||||||
heap: &mut MemoryArena,
|
heap: &mut MemoryArena,
|
||||||
|
term_buffer: &mut Term,
|
||||||
) -> u32 {
|
) -> u32 {
|
||||||
let mut term = Term::for_field(field);
|
term_buffer.set_field(field);
|
||||||
let mut sink = |token: &Token| {
|
let mut sink = |token: &Token| {
|
||||||
// We skip all tokens with a len greater than u16.
|
// We skip all tokens with a len greater than u16.
|
||||||
if token.text.len() <= MAX_TOKEN_LEN {
|
if token.text.len() <= MAX_TOKEN_LEN {
|
||||||
term.set_text(token.text.as_str());
|
term_buffer.set_text(token.text.as_str());
|
||||||
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
|
self.subscribe(
|
||||||
|
term_index,
|
||||||
|
doc_id,
|
||||||
|
token.position as u32,
|
||||||
|
&term_buffer,
|
||||||
|
heap,
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
info!(
|
info!(
|
||||||
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
|
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
|
||||||
@@ -297,7 +312,8 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
|
|||||||
let mut buffer_lender = BufferLender::default();
|
let mut buffer_lender = BufferLender::default();
|
||||||
for &(term_bytes, addr, _) in term_addrs {
|
for &(term_bytes, addr, _) in term_addrs {
|
||||||
let recorder: Rec = termdict_heap.read(addr);
|
let recorder: Rec = termdict_heap.read(addr);
|
||||||
serializer.new_term(&term_bytes[4..])?;
|
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
|
||||||
|
serializer.new_term(&term_bytes[4..], term_doc_freq)?;
|
||||||
recorder.serialize(&mut buffer_lender, serializer, heap)?;
|
recorder.serialize(&mut buffer_lender, serializer, heap)?;
|
||||||
serializer.close_term()?;
|
serializer.close_term()?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,6 +75,10 @@ pub(crate) trait Recorder: Copy + 'static {
|
|||||||
serializer: &mut FieldSerializer<'_>,
|
serializer: &mut FieldSerializer<'_>,
|
||||||
heap: &MemoryArena,
|
heap: &MemoryArena,
|
||||||
) -> io::Result<()>;
|
) -> io::Result<()>;
|
||||||
|
/// Returns the number of document containing this term.
|
||||||
|
///
|
||||||
|
/// Returns `None` if not available.
|
||||||
|
fn term_doc_freq(&self) -> Option<u32>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Only records the doc ids
|
/// Only records the doc ids
|
||||||
@@ -113,11 +117,16 @@ impl Recorder for NothingRecorder {
|
|||||||
) -> io::Result<()> {
|
) -> io::Result<()> {
|
||||||
let buffer = buffer_lender.lend_u8();
|
let buffer = buffer_lender.lend_u8();
|
||||||
self.stack.read_to_end(heap, buffer);
|
self.stack.read_to_end(heap, buffer);
|
||||||
|
// TODO avoid reading twice.
|
||||||
for doc in VInt32Reader::new(&buffer[..]) {
|
for doc in VInt32Reader::new(&buffer[..]) {
|
||||||
serializer.write_doc(doc as u32, 0u32, &[][..])?;
|
serializer.write_doc(doc as u32, 0u32, &[][..])?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn term_doc_freq(&self) -> Option<u32> {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recorder encoding document ids, and term frequencies
|
/// Recorder encoding document ids, and term frequencies
|
||||||
@@ -126,6 +135,7 @@ pub struct TermFrequencyRecorder {
|
|||||||
stack: ExpUnrolledLinkedList,
|
stack: ExpUnrolledLinkedList,
|
||||||
current_doc: DocId,
|
current_doc: DocId,
|
||||||
current_tf: u32,
|
current_tf: u32,
|
||||||
|
term_doc_freq: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Recorder for TermFrequencyRecorder {
|
impl Recorder for TermFrequencyRecorder {
|
||||||
@@ -134,6 +144,7 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: u32::max_value(),
|
current_doc: u32::max_value(),
|
||||||
current_tf: 0u32,
|
current_tf: 0u32,
|
||||||
|
term_doc_freq: 0u32,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,6 +153,7 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
||||||
|
self.term_doc_freq += 1;
|
||||||
self.current_doc = doc;
|
self.current_doc = doc;
|
||||||
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
||||||
}
|
}
|
||||||
@@ -172,6 +184,10 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn term_doc_freq(&self) -> Option<u32> {
|
||||||
|
Some(self.term_doc_freq)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Recorder encoding term frequencies as well as positions.
|
/// Recorder encoding term frequencies as well as positions.
|
||||||
@@ -179,12 +195,14 @@ impl Recorder for TermFrequencyRecorder {
|
|||||||
pub struct TFAndPositionRecorder {
|
pub struct TFAndPositionRecorder {
|
||||||
stack: ExpUnrolledLinkedList,
|
stack: ExpUnrolledLinkedList,
|
||||||
current_doc: DocId,
|
current_doc: DocId,
|
||||||
|
term_doc_freq: u32,
|
||||||
}
|
}
|
||||||
impl Recorder for TFAndPositionRecorder {
|
impl Recorder for TFAndPositionRecorder {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
TFAndPositionRecorder {
|
TFAndPositionRecorder {
|
||||||
stack: ExpUnrolledLinkedList::new(),
|
stack: ExpUnrolledLinkedList::new(),
|
||||||
current_doc: u32::max_value(),
|
current_doc: u32::max_value(),
|
||||||
|
term_doc_freq: 0u32,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,6 +212,7 @@ impl Recorder for TFAndPositionRecorder {
|
|||||||
|
|
||||||
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
|
||||||
self.current_doc = doc;
|
self.current_doc = doc;
|
||||||
|
self.term_doc_freq += 1u32;
|
||||||
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,6 +252,10 @@ impl Recorder for TFAndPositionRecorder {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn term_doc_freq(&self) -> Option<u32> {
|
||||||
|
Some(self.term_doc_freq)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -10,9 +10,10 @@ use crate::postings::BlockSearcher;
|
|||||||
use crate::postings::Postings;
|
use crate::postings::Postings;
|
||||||
|
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::DocId;
|
use crate::{DocId, TERMINATED};
|
||||||
|
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::ReadOnlySource;
|
||||||
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::postings::BlockSegmentPostings;
|
use crate::postings::BlockSegmentPostings;
|
||||||
|
|
||||||
/// `SegmentPostings` represents the inverted list or postings associated to
|
/// `SegmentPostings` represents the inverted list or postings associated to
|
||||||
@@ -20,8 +21,9 @@ use crate::postings::BlockSegmentPostings;
|
|||||||
///
|
///
|
||||||
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
||||||
/// Positions on the other hand, are optionally entirely decoded upfront.
|
/// Positions on the other hand, are optionally entirely decoded upfront.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct SegmentPostings {
|
pub struct SegmentPostings {
|
||||||
block_cursor: BlockSegmentPostings,
|
pub(crate) block_cursor: BlockSegmentPostings,
|
||||||
cur: usize,
|
cur: usize,
|
||||||
position_reader: Option<PositionReader>,
|
position_reader: Option<PositionReader>,
|
||||||
block_searcher: BlockSearcher,
|
block_searcher: BlockSearcher,
|
||||||
@@ -38,6 +40,31 @@ impl SegmentPostings {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute the number of non-deleted documents.
|
||||||
|
///
|
||||||
|
/// This method will clone and scan through the posting lists.
|
||||||
|
/// (this is a rather expensive operation).
|
||||||
|
pub fn doc_freq_given_deletes(&self, delete_bitset: &DeleteBitSet) -> u32 {
|
||||||
|
let mut docset = self.clone();
|
||||||
|
let mut doc_freq = 0;
|
||||||
|
loop {
|
||||||
|
let doc = docset.doc();
|
||||||
|
if doc == TERMINATED {
|
||||||
|
return doc_freq;
|
||||||
|
}
|
||||||
|
if delete_bitset.is_alive(doc) {
|
||||||
|
doc_freq += 1u32;
|
||||||
|
}
|
||||||
|
docset.advance();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the overall number of documents in the block postings.
|
||||||
|
/// It does not take in account whether documents are deleted or not.
|
||||||
|
pub fn doc_freq(&self) -> u32 {
|
||||||
|
self.block_cursor.doc_freq()
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a segment postings object with the given documents
|
/// Creates a segment postings object with the given documents
|
||||||
/// and no frequency encoded.
|
/// and no frequency encoded.
|
||||||
///
|
///
|
||||||
@@ -49,7 +76,9 @@ impl SegmentPostings {
|
|||||||
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
|
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
{
|
{
|
||||||
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false);
|
let mut postings_serializer =
|
||||||
|
PostingsSerializer::new(&mut buffer, 0.0, false, false, None);
|
||||||
|
postings_serializer.new_term(docs.len() as u32);
|
||||||
for &doc in docs {
|
for &doc in docs {
|
||||||
postings_serializer.write_doc(doc, 1u32);
|
postings_serializer.write_doc(doc, 1u32);
|
||||||
}
|
}
|
||||||
@@ -66,6 +95,51 @@ impl SegmentPostings {
|
|||||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper functions to create `SegmentPostings` for tests.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn create_from_docs_and_tfs(
|
||||||
|
doc_and_tfs: &[(u32, u32)],
|
||||||
|
fieldnorms: Option<&[u32]>,
|
||||||
|
) -> SegmentPostings {
|
||||||
|
use crate::fieldnorm::FieldNormReader;
|
||||||
|
use crate::Score;
|
||||||
|
let mut buffer: Vec<u8> = Vec::new();
|
||||||
|
let fieldnorm_reader = fieldnorms.map(FieldNormReader::for_test);
|
||||||
|
let average_field_norm = fieldnorms
|
||||||
|
.map(|fieldnorms| {
|
||||||
|
if fieldnorms.len() == 0 {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
let total_num_tokens: u64 = fieldnorms
|
||||||
|
.iter()
|
||||||
|
.map(|&fieldnorm| fieldnorm as u64)
|
||||||
|
.sum::<u64>();
|
||||||
|
total_num_tokens as Score / fieldnorms.len() as Score
|
||||||
|
})
|
||||||
|
.unwrap_or(0.0);
|
||||||
|
let mut postings_serializer = PostingsSerializer::new(
|
||||||
|
&mut buffer,
|
||||||
|
average_field_norm,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
fieldnorm_reader,
|
||||||
|
);
|
||||||
|
postings_serializer.new_term(doc_and_tfs.len() as u32);
|
||||||
|
for &(doc, tf) in doc_and_tfs {
|
||||||
|
postings_serializer.write_doc(doc, tf);
|
||||||
|
}
|
||||||
|
postings_serializer
|
||||||
|
.close_term(doc_and_tfs.len() as u32)
|
||||||
|
.unwrap();
|
||||||
|
let block_segment_postings = BlockSegmentPostings::from_data(
|
||||||
|
doc_and_tfs.len() as u32,
|
||||||
|
ReadOnlySource::from(buffer),
|
||||||
|
IndexRecordOption::WithFreqs,
|
||||||
|
IndexRecordOption::WithFreqs,
|
||||||
|
);
|
||||||
|
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||||
|
}
|
||||||
|
|
||||||
/// Reads a Segment postings from an &[u8]
|
/// Reads a Segment postings from an &[u8]
|
||||||
///
|
///
|
||||||
/// * `len` - number of document in the posting lists.
|
/// * `len` - number of document in the posting lists.
|
||||||
@@ -90,6 +164,7 @@ impl DocSet for SegmentPostings {
|
|||||||
// next needs to be called a first time to point to the correct element.
|
// next needs to be called a first time to point to the correct element.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> DocId {
|
||||||
|
debug_assert!(self.block_cursor.block_is_loaded());
|
||||||
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
|
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
|
||||||
self.cur = 0;
|
self.cur = 0;
|
||||||
self.block_cursor.advance();
|
self.block_cursor.advance();
|
||||||
@@ -100,14 +175,15 @@ impl DocSet for SegmentPostings {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
if self.doc() == target {
|
debug_assert!(self.doc() <= target);
|
||||||
return target;
|
if self.doc() >= target {
|
||||||
|
return self.doc();
|
||||||
}
|
}
|
||||||
|
|
||||||
self.block_cursor.seek(target);
|
self.block_cursor.seek(target);
|
||||||
|
|
||||||
// At this point we are on the block, that might contain our document.
|
// At this point we are on the block, that might contain our document.
|
||||||
let output = self.block_cursor.docs_aligned();
|
let output = self.block_cursor.docs_aligned();
|
||||||
|
|
||||||
self.cur = self.block_searcher.search_in_block(&output, target);
|
self.cur = self.block_searcher.search_in_block(&output, target);
|
||||||
|
|
||||||
// The last block is not full and padded with the value TERMINATED,
|
// The last block is not full and padded with the value TERMINATED,
|
||||||
@@ -123,6 +199,7 @@ impl DocSet for SegmentPostings {
|
|||||||
// After the search, the cursor should point to the first value of TERMINATED.
|
// After the search, the cursor should point to the first value of TERMINATED.
|
||||||
let doc = output.0[self.cur];
|
let doc = output.0[self.cur];
|
||||||
debug_assert!(doc >= target);
|
debug_assert!(doc >= target);
|
||||||
|
debug_assert_eq!(doc, self.doc());
|
||||||
doc
|
doc
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,7 +216,7 @@ impl DocSet for SegmentPostings {
|
|||||||
|
|
||||||
impl HasLen for SegmentPostings {
|
impl HasLen for SegmentPostings {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
self.block_cursor.doc_freq()
|
self.block_cursor.doc_freq() as usize
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,6 +271,7 @@ mod tests {
|
|||||||
use crate::common::HasLen;
|
use crate::common::HasLen;
|
||||||
|
|
||||||
use crate::docset::{DocSet, TERMINATED};
|
use crate::docset::{DocSet, TERMINATED};
|
||||||
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::postings::postings::Postings;
|
use crate::postings::postings::Postings;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -216,4 +294,14 @@ mod tests {
|
|||||||
let postings = SegmentPostings::empty();
|
let postings = SegmentPostings::empty();
|
||||||
assert_eq!(postings.term_freq(), 1);
|
assert_eq!(postings.term_freq(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_doc_freq() {
|
||||||
|
let docs = SegmentPostings::create_from_docs(&[0, 2, 10]);
|
||||||
|
assert_eq!(docs.doc_freq(), 3);
|
||||||
|
let delete_bitset = DeleteBitSet::for_test(&[2], 12);
|
||||||
|
assert_eq!(docs.doc_freq_given_deletes(&delete_bitset), 2);
|
||||||
|
let all_deleted = DeleteBitSet::for_test(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
|
||||||
|
assert_eq!(docs.doc_freq_given_deletes(&all_deleted), 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,21 @@
|
|||||||
use super::TermInfo;
|
use super::{FieldStat, FieldStats, TermInfo};
|
||||||
use crate::common::{BinarySerializable, VInt};
|
|
||||||
use crate::common::{CompositeWrite, CountingWriter};
|
use crate::common::{CompositeWrite, CountingWriter};
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::directory::WritePtr;
|
use crate::directory::WritePtr;
|
||||||
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::positions::PositionSerializer;
|
use crate::positions::PositionSerializer;
|
||||||
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
||||||
use crate::postings::skip::SkipSerializer;
|
use crate::postings::skip::SkipSerializer;
|
||||||
|
use crate::query::BM25Weight;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::{Field, FieldEntry, FieldType};
|
use crate::schema::{Field, FieldEntry, FieldType};
|
||||||
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||||
use crate::DocId;
|
use crate::{
|
||||||
|
common::{BinarySerializable, VInt},
|
||||||
|
directory::TerminatingWrite,
|
||||||
|
};
|
||||||
|
use crate::{DocId, Score};
|
||||||
|
use std::cmp::Ordering;
|
||||||
use std::io::{self, Write};
|
use std::io::{self, Write};
|
||||||
|
|
||||||
/// `InvertedIndexSerializer` is in charge of serializing
|
/// `InvertedIndexSerializer` is in charge of serializing
|
||||||
@@ -48,6 +54,8 @@ pub struct InvertedIndexSerializer {
|
|||||||
postings_write: CompositeWrite<WritePtr>,
|
postings_write: CompositeWrite<WritePtr>,
|
||||||
positions_write: CompositeWrite<WritePtr>,
|
positions_write: CompositeWrite<WritePtr>,
|
||||||
positionsidx_write: CompositeWrite<WritePtr>,
|
positionsidx_write: CompositeWrite<WritePtr>,
|
||||||
|
field_stats: FieldStats,
|
||||||
|
field_stats_write: WritePtr,
|
||||||
schema: Schema,
|
schema: Schema,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,6 +66,7 @@ impl InvertedIndexSerializer {
|
|||||||
postings_write: CompositeWrite<WritePtr>,
|
postings_write: CompositeWrite<WritePtr>,
|
||||||
positions_write: CompositeWrite<WritePtr>,
|
positions_write: CompositeWrite<WritePtr>,
|
||||||
positionsidx_write: CompositeWrite<WritePtr>,
|
positionsidx_write: CompositeWrite<WritePtr>,
|
||||||
|
field_stats_write: WritePtr,
|
||||||
schema: Schema,
|
schema: Schema,
|
||||||
) -> crate::Result<InvertedIndexSerializer> {
|
) -> crate::Result<InvertedIndexSerializer> {
|
||||||
Ok(InvertedIndexSerializer {
|
Ok(InvertedIndexSerializer {
|
||||||
@@ -65,18 +74,21 @@ impl InvertedIndexSerializer {
|
|||||||
postings_write,
|
postings_write,
|
||||||
positions_write,
|
positions_write,
|
||||||
positionsidx_write,
|
positionsidx_write,
|
||||||
|
field_stats: FieldStats::default(),
|
||||||
|
field_stats_write,
|
||||||
schema,
|
schema,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open a new `PostingsSerializer` for the given segment
|
/// Open a new `PostingsSerializer` for the given segment
|
||||||
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
|
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
|
||||||
use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
|
use crate::SegmentComponent::{FIELDSTATS, POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
|
||||||
InvertedIndexSerializer::create(
|
InvertedIndexSerializer::create(
|
||||||
CompositeWrite::wrap(segment.open_write(TERMS)?),
|
CompositeWrite::wrap(segment.open_write(TERMS)?),
|
||||||
CompositeWrite::wrap(segment.open_write(POSTINGS)?),
|
CompositeWrite::wrap(segment.open_write(POSTINGS)?),
|
||||||
CompositeWrite::wrap(segment.open_write(POSITIONS)?),
|
CompositeWrite::wrap(segment.open_write(POSITIONS)?),
|
||||||
CompositeWrite::wrap(segment.open_write(POSITIONSSKIP)?),
|
CompositeWrite::wrap(segment.open_write(POSITIONSSKIP)?),
|
||||||
|
segment.open_write(FIELDSTATS)?,
|
||||||
segment.schema(),
|
segment.schema(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -89,25 +101,32 @@ impl InvertedIndexSerializer {
|
|||||||
&mut self,
|
&mut self,
|
||||||
field: Field,
|
field: Field,
|
||||||
total_num_tokens: u64,
|
total_num_tokens: u64,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> io::Result<FieldSerializer<'_>> {
|
) -> io::Result<FieldSerializer<'_>> {
|
||||||
|
self.field_stats
|
||||||
|
.insert(field, FieldStat::new(total_num_tokens));
|
||||||
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
|
||||||
let term_dictionary_write = self.terms_write.for_field(field);
|
let term_dictionary_write = self.terms_write.for_field(field);
|
||||||
let postings_write = self.postings_write.for_field(field);
|
let postings_write = self.postings_write.for_field(field);
|
||||||
total_num_tokens.serialize(postings_write)?;
|
|
||||||
let positions_write = self.positions_write.for_field(field);
|
let positions_write = self.positions_write.for_field(field);
|
||||||
let positionsidx_write = self.positionsidx_write.for_field(field);
|
let positionsidx_write = self.positionsidx_write.for_field(field);
|
||||||
let field_type: FieldType = (*field_entry.field_type()).clone();
|
let field_type: FieldType = (*field_entry.field_type()).clone();
|
||||||
FieldSerializer::create(
|
FieldSerializer::create(
|
||||||
&field_type,
|
&field_type,
|
||||||
|
total_num_tokens,
|
||||||
term_dictionary_write,
|
term_dictionary_write,
|
||||||
postings_write,
|
postings_write,
|
||||||
positions_write,
|
positions_write,
|
||||||
positionsidx_write,
|
positionsidx_write,
|
||||||
|
fieldnorm_reader,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Closes the serializer.
|
/// Closes the serializer.
|
||||||
pub fn close(self) -> io::Result<()> {
|
pub fn close(mut self) -> io::Result<()> {
|
||||||
|
self.field_stats
|
||||||
|
.serialize(self.field_stats_write.get_mut())?;
|
||||||
|
self.field_stats_write.terminate()?;
|
||||||
self.terms_write.close()?;
|
self.terms_write.close()?;
|
||||||
self.postings_write.close()?;
|
self.postings_write.close()?;
|
||||||
self.positions_write.close()?;
|
self.positions_write.close()?;
|
||||||
@@ -130,10 +149,12 @@ pub struct FieldSerializer<'a> {
|
|||||||
impl<'a> FieldSerializer<'a> {
|
impl<'a> FieldSerializer<'a> {
|
||||||
fn create(
|
fn create(
|
||||||
field_type: &FieldType,
|
field_type: &FieldType,
|
||||||
|
total_num_tokens: u64,
|
||||||
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
|
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
|
||||||
postings_write: &'a mut CountingWriter<WritePtr>,
|
postings_write: &'a mut CountingWriter<WritePtr>,
|
||||||
positions_write: &'a mut CountingWriter<WritePtr>,
|
positions_write: &'a mut CountingWriter<WritePtr>,
|
||||||
positionsidx_write: &'a mut CountingWriter<WritePtr>,
|
positionsidx_write: &'a mut CountingWriter<WritePtr>,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> io::Result<FieldSerializer<'a>> {
|
) -> io::Result<FieldSerializer<'a>> {
|
||||||
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
|
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
|
||||||
FieldType::Str(ref text_options) => {
|
FieldType::Str(ref text_options) => {
|
||||||
@@ -147,8 +168,17 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
_ => (false, false),
|
_ => (false, false),
|
||||||
};
|
};
|
||||||
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
|
||||||
let postings_serializer =
|
let average_fieldnorm = fieldnorm_reader
|
||||||
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
|
.as_ref()
|
||||||
|
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
|
||||||
|
.unwrap_or(0.0);
|
||||||
|
let postings_serializer = PostingsSerializer::new(
|
||||||
|
postings_write,
|
||||||
|
average_fieldnorm,
|
||||||
|
term_freq_enabled,
|
||||||
|
position_enabled,
|
||||||
|
fieldnorm_reader,
|
||||||
|
);
|
||||||
let positions_serializer_opt = if position_enabled {
|
let positions_serializer_opt = if position_enabled {
|
||||||
Some(PositionSerializer::new(positions_write, positionsidx_write))
|
Some(PositionSerializer::new(positions_write, positionsidx_write))
|
||||||
} else {
|
} else {
|
||||||
@@ -173,7 +203,8 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
.unwrap_or(0u64);
|
.unwrap_or(0u64);
|
||||||
TermInfo {
|
TermInfo {
|
||||||
doc_freq: 0,
|
doc_freq: 0,
|
||||||
postings_offset: self.postings_serializer.addr(),
|
postings_start_offset: self.postings_serializer.addr(),
|
||||||
|
postings_end_offset: 0u64,
|
||||||
positions_idx,
|
positions_idx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -181,18 +212,20 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
/// Starts the postings for a new term.
|
/// Starts the postings for a new term.
|
||||||
/// * term - the term. It needs to come after the previous term according
|
/// * term - the term. It needs to come after the previous term according
|
||||||
/// to the lexicographical order.
|
/// to the lexicographical order.
|
||||||
/// * doc_freq - return the number of document containing the term.
|
/// * term_doc_freq - return the number of document containing the term.
|
||||||
pub fn new_term(&mut self, term: &[u8]) -> io::Result<TermOrdinal> {
|
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
|
||||||
assert!(
|
assert!(
|
||||||
!self.term_open,
|
!self.term_open,
|
||||||
"Called new_term, while the previous term was not closed."
|
"Called new_term, while the previous term was not closed."
|
||||||
);
|
);
|
||||||
|
|
||||||
self.term_open = true;
|
self.term_open = true;
|
||||||
self.postings_serializer.clear();
|
self.postings_serializer.clear();
|
||||||
self.current_term_info = self.current_term_info();
|
self.current_term_info = self.current_term_info();
|
||||||
self.term_dictionary_builder.insert_key(term)?;
|
self.term_dictionary_builder.insert_key(term)?;
|
||||||
let term_ordinal = self.num_terms;
|
let term_ordinal = self.num_terms;
|
||||||
self.num_terms += 1;
|
self.num_terms += 1;
|
||||||
|
self.postings_serializer.new_term(term_doc_freq);
|
||||||
Ok(term_ordinal)
|
Ok(term_ordinal)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -225,10 +258,12 @@ impl<'a> FieldSerializer<'a> {
|
|||||||
/// using `VInt` encoding.
|
/// using `VInt` encoding.
|
||||||
pub fn close_term(&mut self) -> io::Result<()> {
|
pub fn close_term(&mut self) -> io::Result<()> {
|
||||||
if self.term_open {
|
if self.term_open {
|
||||||
self.term_dictionary_builder
|
|
||||||
.insert_value(&self.current_term_info)?;
|
|
||||||
self.postings_serializer
|
self.postings_serializer
|
||||||
.close_term(self.current_term_info.doc_freq)?;
|
.close_term(self.current_term_info.doc_freq)?;
|
||||||
|
let end_offset = self.postings_serializer.addr();
|
||||||
|
self.current_term_info.postings_end_offset = end_offset;
|
||||||
|
self.term_dictionary_builder
|
||||||
|
.insert_value(&self.current_term_info)?;
|
||||||
self.term_open = false;
|
self.term_open = false;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -306,14 +341,27 @@ pub struct PostingsSerializer<W: Write> {
|
|||||||
|
|
||||||
termfreq_enabled: bool,
|
termfreq_enabled: bool,
|
||||||
termfreq_sum_enabled: bool,
|
termfreq_sum_enabled: bool,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
|
|
||||||
|
bm25_weight: Option<BM25Weight>,
|
||||||
|
|
||||||
|
num_docs: u32, // Number of docs in the segment
|
||||||
|
avg_fieldnorm: Score, // Average number of term in the field for that segment.
|
||||||
|
// this value is used to compute the block wand information.
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<W: Write> PostingsSerializer<W> {
|
impl<W: Write> PostingsSerializer<W> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
write: W,
|
write: W,
|
||||||
|
avg_fieldnorm: Score,
|
||||||
termfreq_enabled: bool,
|
termfreq_enabled: bool,
|
||||||
termfreq_sum_enabled: bool,
|
termfreq_sum_enabled: bool,
|
||||||
|
fieldnorm_reader: Option<FieldNormReader>,
|
||||||
) -> PostingsSerializer<W> {
|
) -> PostingsSerializer<W> {
|
||||||
|
let num_docs = fieldnorm_reader
|
||||||
|
.as_ref()
|
||||||
|
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
|
||||||
|
.unwrap_or(0u32);
|
||||||
PostingsSerializer {
|
PostingsSerializer {
|
||||||
output_write: CountingWriter::wrap(write),
|
output_write: CountingWriter::wrap(write),
|
||||||
|
|
||||||
@@ -326,6 +374,23 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
last_doc_id_encoded: 0u32,
|
last_doc_id_encoded: 0u32,
|
||||||
termfreq_enabled,
|
termfreq_enabled,
|
||||||
termfreq_sum_enabled,
|
termfreq_sum_enabled,
|
||||||
|
|
||||||
|
fieldnorm_reader,
|
||||||
|
bm25_weight: None,
|
||||||
|
|
||||||
|
num_docs,
|
||||||
|
avg_fieldnorm,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_term(&mut self, term_doc_freq: u32) {
|
||||||
|
if self.termfreq_enabled && self.num_docs > 0 {
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(
|
||||||
|
term_doc_freq as u64,
|
||||||
|
self.num_docs as u64,
|
||||||
|
self.avg_fieldnorm,
|
||||||
|
);
|
||||||
|
self.bm25_weight = Some(bm25_weight);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -342,7 +407,6 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
self.postings_write.extend(block_encoded);
|
self.postings_write.extend(block_encoded);
|
||||||
}
|
}
|
||||||
if self.termfreq_enabled {
|
if self.termfreq_enabled {
|
||||||
// encode the term_freqs
|
|
||||||
let (num_bits, block_encoded): (u8, &[u8]) = self
|
let (num_bits, block_encoded): (u8, &[u8]) = self
|
||||||
.block_encoder
|
.block_encoder
|
||||||
.compress_block_unsorted(&self.block.term_freqs());
|
.compress_block_unsorted(&self.block.term_freqs());
|
||||||
@@ -352,6 +416,31 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
let sum_freq = self.block.term_freqs().iter().cloned().sum();
|
let sum_freq = self.block.term_freqs().iter().cloned().sum();
|
||||||
self.skip_write.write_total_term_freq(sum_freq);
|
self.skip_write.write_total_term_freq(sum_freq);
|
||||||
}
|
}
|
||||||
|
let mut blockwand_params = (0u8, 0u32);
|
||||||
|
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
|
||||||
|
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
|
||||||
|
let docs = self.block.doc_ids().iter().cloned();
|
||||||
|
let term_freqs = self.block.term_freqs().iter().cloned();
|
||||||
|
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
|
||||||
|
blockwand_params = fieldnorms
|
||||||
|
.zip(term_freqs)
|
||||||
|
.max_by(
|
||||||
|
|(left_fieldnorm_id, left_term_freq),
|
||||||
|
(right_fieldnorm_id, right_term_freq)| {
|
||||||
|
let left_score =
|
||||||
|
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
|
||||||
|
let right_score =
|
||||||
|
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
|
||||||
|
left_score
|
||||||
|
.partial_cmp(&right_score)
|
||||||
|
.unwrap_or(Ordering::Equal)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let (fieldnorm_id, term_freq) = blockwand_params;
|
||||||
|
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
|
||||||
}
|
}
|
||||||
self.block.clear();
|
self.block.clear();
|
||||||
}
|
}
|
||||||
@@ -400,6 +489,7 @@ impl<W: Write> PostingsSerializer<W> {
|
|||||||
}
|
}
|
||||||
self.skip_write.clear();
|
self.skip_write.clear();
|
||||||
self.postings_write.clear();
|
self.postings_write.clear();
|
||||||
|
self.bm25_weight = None;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
use crate::common::BinarySerializable;
|
use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, VInt};
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
||||||
|
use crate::query::BM25Weight;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::{DocId, TERMINATED};
|
use crate::{DocId, Score, TERMINATED};
|
||||||
use owned_read::OwnedRead;
|
use owned_read::OwnedRead;
|
||||||
|
|
||||||
pub struct SkipSerializer {
|
pub struct SkipSerializer {
|
||||||
@@ -40,6 +41,13 @@ impl SkipSerializer {
|
|||||||
.expect("Should never fail");
|
.expect("Should never fail");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
|
||||||
|
self.buffer.push(fieldnorm_id);
|
||||||
|
let mut buf = [0u8; 8];
|
||||||
|
let bytes = serialize_vint_u32(term_freq, &mut buf);
|
||||||
|
self.buffer.extend_from_slice(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn data(&self) -> &[u8] {
|
pub fn data(&self) -> &[u8] {
|
||||||
&self.buffer[..]
|
&self.buffer[..]
|
||||||
}
|
}
|
||||||
@@ -50,6 +58,7 @@ impl SkipSerializer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub(crate) struct SkipReader {
|
pub(crate) struct SkipReader {
|
||||||
last_doc_in_block: DocId,
|
last_doc_in_block: DocId,
|
||||||
pub(crate) last_doc_in_previous_block: DocId,
|
pub(crate) last_doc_in_previous_block: DocId,
|
||||||
@@ -69,41 +78,74 @@ pub(crate) enum BlockInfo {
|
|||||||
doc_num_bits: u8,
|
doc_num_bits: u8,
|
||||||
tf_num_bits: u8,
|
tf_num_bits: u8,
|
||||||
tf_sum: u32,
|
tf_sum: u32,
|
||||||
|
block_wand_fieldnorm_id: u8,
|
||||||
|
block_wand_term_freq: u32,
|
||||||
|
},
|
||||||
|
VInt {
|
||||||
|
num_docs: u32,
|
||||||
},
|
},
|
||||||
VInt(u32),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for BlockInfo {
|
impl Default for BlockInfo {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
BlockInfo::VInt(0)
|
BlockInfo::VInt { num_docs: 0u32 }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SkipReader {
|
impl SkipReader {
|
||||||
pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
||||||
SkipReader {
|
let mut skip_reader = SkipReader {
|
||||||
last_doc_in_block: 0u32,
|
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
TERMINATED
|
||||||
|
},
|
||||||
last_doc_in_previous_block: 0u32,
|
last_doc_in_previous_block: 0u32,
|
||||||
owned_read: OwnedRead::new(data),
|
owned_read: OwnedRead::new(data),
|
||||||
skip_info,
|
skip_info,
|
||||||
block_info: BlockInfo::default(),
|
block_info: BlockInfo::VInt { num_docs: doc_freq },
|
||||||
byte_offset: 0,
|
byte_offset: 0,
|
||||||
remaining_docs: doc_freq,
|
remaining_docs: doc_freq,
|
||||||
position_offset: 0u64,
|
position_offset: 0u64,
|
||||||
|
};
|
||||||
|
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
skip_reader.read_block_info();
|
||||||
}
|
}
|
||||||
|
skip_reader
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
|
pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
|
||||||
self.last_doc_in_block = 0u32;
|
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
TERMINATED
|
||||||
|
};
|
||||||
self.last_doc_in_previous_block = 0u32;
|
self.last_doc_in_previous_block = 0u32;
|
||||||
self.owned_read = OwnedRead::new(data);
|
self.owned_read = OwnedRead::new(data);
|
||||||
self.block_info = BlockInfo::default();
|
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
|
||||||
self.byte_offset = 0;
|
self.byte_offset = 0;
|
||||||
self.remaining_docs = doc_freq;
|
self.remaining_docs = doc_freq;
|
||||||
|
self.position_offset = 0u64;
|
||||||
|
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
self.read_block_info();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the block max score for this block if available.
|
||||||
|
//
|
||||||
|
// The block max score is available for all full bitpacked block,
|
||||||
|
// but no available for the last VInt encoded incomplete block.
|
||||||
|
pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
|
||||||
|
match self.block_info {
|
||||||
|
BlockInfo::BitPacked {
|
||||||
|
block_wand_fieldnorm_id,
|
||||||
|
block_wand_term_freq,
|
||||||
|
..
|
||||||
|
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
|
||||||
|
BlockInfo::VInt { .. } => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[inline(always)]
|
|
||||||
pub(crate) fn last_doc_in_block(&self) -> DocId {
|
pub(crate) fn last_doc_in_block(&self) -> DocId {
|
||||||
self.last_doc_in_block
|
self.last_doc_in_block
|
||||||
}
|
}
|
||||||
@@ -127,25 +169,38 @@ impl SkipReader {
|
|||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0,
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqs => {
|
IndexRecordOption::WithFreqs => {
|
||||||
let tf_num_bits = self.owned_read.get(1);
|
let tf_num_bits = self.owned_read.get(1);
|
||||||
|
let block_wand_fieldnorm_id = self.owned_read.get(2);
|
||||||
|
let data = &self.owned_read.as_ref()[3..];
|
||||||
|
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(data);
|
||||||
|
self.owned_read.advance(3 + num_bytes);
|
||||||
self.block_info = BlockInfo::BitPacked {
|
self.block_info = BlockInfo::BitPacked {
|
||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
tf_sum: 0,
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id,
|
||||||
|
block_wand_term_freq,
|
||||||
};
|
};
|
||||||
self.owned_read.advance(2);
|
|
||||||
}
|
}
|
||||||
IndexRecordOption::WithFreqsAndPositions => {
|
IndexRecordOption::WithFreqsAndPositions => {
|
||||||
let tf_num_bits = self.owned_read.get(1);
|
let tf_num_bits = self.owned_read.get(1);
|
||||||
self.owned_read.advance(2);
|
self.owned_read.advance(2);
|
||||||
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
||||||
|
let block_wand_fieldnorm_id = self.owned_read.get(0);
|
||||||
|
self.owned_read.advance(1);
|
||||||
|
let block_wand_term_freq =
|
||||||
|
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
|
||||||
self.block_info = BlockInfo::BitPacked {
|
self.block_info = BlockInfo::BitPacked {
|
||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
tf_sum,
|
tf_sum,
|
||||||
|
block_wand_fieldnorm_id,
|
||||||
|
block_wand_term_freq,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -159,35 +214,44 @@ impl SkipReader {
|
|||||||
///
|
///
|
||||||
/// If the target is larger than all documents, the skip_reader
|
/// If the target is larger than all documents, the skip_reader
|
||||||
/// then advance to the last Variable In block.
|
/// then advance to the last Variable In block.
|
||||||
pub fn seek(&mut self, target: DocId) {
|
pub fn seek(&mut self, target: DocId) -> bool {
|
||||||
while self.last_doc_in_block < target {
|
if self.last_doc_in_block() >= target {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
loop {
|
||||||
self.advance();
|
self.advance();
|
||||||
|
if self.last_doc_in_block() >= target {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn advance(&mut self) -> bool {
|
pub fn advance(&mut self) {
|
||||||
match self.block_info {
|
match self.block_info {
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits,
|
doc_num_bits,
|
||||||
tf_num_bits,
|
tf_num_bits,
|
||||||
tf_sum,
|
tf_sum,
|
||||||
|
..
|
||||||
} => {
|
} => {
|
||||||
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
|
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
|
||||||
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
|
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
|
||||||
self.position_offset += tf_sum as u64;
|
self.position_offset += tf_sum as u64;
|
||||||
}
|
}
|
||||||
BlockInfo::VInt(num_vint_docs) => {
|
BlockInfo::VInt { num_docs } => {
|
||||||
self.remaining_docs -= num_vint_docs;
|
debug_assert_eq!(num_docs, self.remaining_docs);
|
||||||
|
self.remaining_docs = 0;
|
||||||
|
self.byte_offset = std::usize::MAX;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.last_doc_in_previous_block = self.last_doc_in_block;
|
self.last_doc_in_previous_block = self.last_doc_in_block;
|
||||||
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
|
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
self.read_block_info();
|
self.read_block_info();
|
||||||
true
|
|
||||||
} else {
|
} else {
|
||||||
self.last_doc_in_block = TERMINATED;
|
self.last_doc_in_block = TERMINATED;
|
||||||
self.block_info = BlockInfo::VInt(self.remaining_docs);
|
self.block_info = BlockInfo::VInt {
|
||||||
self.remaining_docs > 0
|
num_docs: self.remaining_docs,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -207,8 +271,10 @@ mod tests {
|
|||||||
let mut skip_serializer = SkipSerializer::new();
|
let mut skip_serializer = SkipSerializer::new();
|
||||||
skip_serializer.write_doc(1u32, 2u8);
|
skip_serializer.write_doc(1u32, 2u8);
|
||||||
skip_serializer.write_term_freq(3u8);
|
skip_serializer.write_term_freq(3u8);
|
||||||
|
skip_serializer.write_blockwand_max(13u8, 3u32);
|
||||||
skip_serializer.write_doc(5u32, 5u8);
|
skip_serializer.write_doc(5u32, 5u8);
|
||||||
skip_serializer.write_term_freq(2u8);
|
skip_serializer.write_term_freq(2u8);
|
||||||
|
skip_serializer.write_blockwand_max(8u8, 2u32);
|
||||||
skip_serializer.data().to_owned()
|
skip_serializer.data().to_owned()
|
||||||
};
|
};
|
||||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||||
@@ -217,29 +283,35 @@ mod tests {
|
|||||||
doc_freq,
|
doc_freq,
|
||||||
IndexRecordOption::WithFreqs,
|
IndexRecordOption::WithFreqs,
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info,
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 2u8,
|
doc_num_bits: 2u8,
|
||||||
tf_num_bits: 3u8,
|
tf_num_bits: 3u8,
|
||||||
tf_sum: 0
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id: 13,
|
||||||
|
block_wand_term_freq: 3
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 5u8,
|
doc_num_bits: 5u8,
|
||||||
tf_num_bits: 2u8,
|
tf_num_bits: 2u8,
|
||||||
tf_sum: 0
|
tf_sum: 0,
|
||||||
|
block_wand_fieldnorm_id: 8,
|
||||||
|
block_wand_term_freq: 2
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||||
assert!(!skip_reader.advance());
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -256,29 +328,35 @@ mod tests {
|
|||||||
doc_freq,
|
doc_freq,
|
||||||
IndexRecordOption::Basic,
|
IndexRecordOption::Basic,
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 2u8,
|
doc_num_bits: 2u8,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0u32
|
tf_sum: 0u32,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 5u8,
|
doc_num_bits: 5u8,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0u32
|
tf_sum: 0u32,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
skip_reader.advance();
|
||||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||||
assert!(!skip_reader.advance());
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -294,16 +372,18 @@ mod tests {
|
|||||||
doc_freq,
|
doc_freq,
|
||||||
IndexRecordOption::Basic,
|
IndexRecordOption::Basic,
|
||||||
);
|
);
|
||||||
assert!(skip_reader.advance());
|
|
||||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
skip_reader.block_info(),
|
skip_reader.block_info(),
|
||||||
BlockInfo::BitPacked {
|
BlockInfo::BitPacked {
|
||||||
doc_num_bits: 2u8,
|
doc_num_bits: 2u8,
|
||||||
tf_num_bits: 0,
|
tf_num_bits: 0,
|
||||||
tf_sum: 0u32
|
tf_sum: 0u32,
|
||||||
|
block_wand_fieldnorm_id: 0,
|
||||||
|
block_wand_term_freq: 0
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert!(!skip_reader.advance());
|
skip_reader.advance();
|
||||||
|
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,35 +7,49 @@ use std::io;
|
|||||||
pub struct TermInfo {
|
pub struct TermInfo {
|
||||||
/// Number of documents in the segment containing the term
|
/// Number of documents in the segment containing the term
|
||||||
pub doc_freq: u32,
|
pub doc_freq: u32,
|
||||||
/// Start offset within the postings (`.idx`) file.
|
/// Start offset of the posting list within the postings (`.idx`) file.
|
||||||
pub postings_offset: u64,
|
pub postings_start_offset: u64,
|
||||||
|
/// End offset of the posting list within the postings (`.idx`) file.
|
||||||
|
pub postings_end_offset: u64,
|
||||||
/// Start offset of the first block within the position (`.pos`) file.
|
/// Start offset of the first block within the position (`.pos`) file.
|
||||||
pub positions_idx: u64,
|
pub positions_idx: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl TermInfo {
|
||||||
|
pub(crate) fn posting_num_bytes(&self) -> u32 {
|
||||||
|
let num_bytes = self.postings_end_offset - self.postings_start_offset;
|
||||||
|
assert!(num_bytes <= std::u32::MAX as u64);
|
||||||
|
num_bytes as u32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl FixedSize for TermInfo {
|
impl FixedSize for TermInfo {
|
||||||
/// Size required for the binary serialization of a `TermInfo` object.
|
/// Size required for the binary serialization of a `TermInfo` object.
|
||||||
/// This is large, but in practise, `TermInfo` are encoded in blocks and
|
/// This is large, but in practise, `TermInfo` are encoded in blocks and
|
||||||
/// only the first `TermInfo` of a block is serialized uncompressed.
|
/// only the first `TermInfo` of a block is serialized uncompressed.
|
||||||
/// The subsequent `TermInfo` are delta encoded and bitpacked.
|
/// The subsequent `TermInfo` are delta encoded and bitpacked.
|
||||||
const SIZE_IN_BYTES: usize = u32::SIZE_IN_BYTES + 2 * u64::SIZE_IN_BYTES;
|
const SIZE_IN_BYTES: usize = 2 * u32::SIZE_IN_BYTES + 2 * u64::SIZE_IN_BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for TermInfo {
|
impl BinarySerializable for TermInfo {
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
self.doc_freq.serialize(writer)?;
|
self.doc_freq.serialize(writer)?;
|
||||||
self.postings_offset.serialize(writer)?;
|
self.postings_start_offset.serialize(writer)?;
|
||||||
|
self.posting_num_bytes().serialize(writer)?;
|
||||||
self.positions_idx.serialize(writer)?;
|
self.positions_idx.serialize(writer)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
let doc_freq = u32::deserialize(reader)?;
|
let doc_freq = u32::deserialize(reader)?;
|
||||||
let postings_offset = u64::deserialize(reader)?;
|
let postings_start_offset = u64::deserialize(reader)?;
|
||||||
|
let postings_num_bytes = u32::deserialize(reader)?;
|
||||||
|
let postings_end_offset = postings_start_offset + u64::from(postings_num_bytes);
|
||||||
let positions_idx = u64::deserialize(reader)?;
|
let positions_idx = u64::deserialize(reader)?;
|
||||||
Ok(TermInfo {
|
Ok(TermInfo {
|
||||||
doc_freq,
|
doc_freq,
|
||||||
postings_offset,
|
postings_start_offset,
|
||||||
|
postings_end_offset,
|
||||||
positions_idx,
|
positions_idx,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use crate::Score;
|
|||||||
|
|
||||||
/// Query that matches all of the documents.
|
/// Query that matches all of the documents.
|
||||||
///
|
///
|
||||||
/// All of the document get the score 1f32.
|
/// All of the document get the score 1.0.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct AllQuery;
|
pub struct AllQuery;
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ impl Query for AllQuery {
|
|||||||
pub struct AllWeight;
|
pub struct AllWeight;
|
||||||
|
|
||||||
impl Weight for AllWeight {
|
impl Weight for AllWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let all_scorer = AllScorer {
|
let all_scorer = AllScorer {
|
||||||
doc: 0u32,
|
doc: 0u32,
|
||||||
max_doc: reader.max_doc(),
|
max_doc: reader.max_doc(),
|
||||||
@@ -35,7 +35,7 @@ impl Weight for AllWeight {
|
|||||||
if doc >= reader.max_doc() {
|
if doc >= reader.max_doc() {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
Ok(Explanation::new("AllQuery", 1f32))
|
Ok(Explanation::new("AllQuery", 1.0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@ impl DocSet for AllScorer {
|
|||||||
|
|
||||||
impl Scorer for AllScorer {
|
impl Scorer for AllScorer {
|
||||||
fn score(&mut self) -> Score {
|
fn score(&mut self) -> Score {
|
||||||
1f32
|
1.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,7 +83,7 @@ mod tests {
|
|||||||
let field = schema_builder.add_text_field("text", TEXT);
|
let field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(field=>"aaa"));
|
index_writer.add_document(doc!(field=>"aaa"));
|
||||||
index_writer.add_document(doc!(field=>"bbb"));
|
index_writer.add_document(doc!(field=>"bbb"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
@@ -100,7 +100,7 @@ mod tests {
|
|||||||
let weight = AllQuery.weight(&searcher, false).unwrap();
|
let weight = AllQuery.weight(&searcher, false).unwrap();
|
||||||
{
|
{
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.0).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.advance(), 1u32);
|
assert_eq!(scorer.advance(), 1u32);
|
||||||
assert_eq!(scorer.doc(), 1u32);
|
assert_eq!(scorer.doc(), 1u32);
|
||||||
@@ -108,7 +108,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let reader = searcher.segment_reader(1);
|
let reader = searcher.segment_reader(1);
|
||||||
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.0).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.advance(), TERMINATED);
|
assert_eq!(scorer.advance(), TERMINATED);
|
||||||
}
|
}
|
||||||
@@ -122,14 +122,14 @@ mod tests {
|
|||||||
let weight = AllQuery.weight(&searcher, false).unwrap();
|
let weight = AllQuery.weight(&searcher, false).unwrap();
|
||||||
let reader = searcher.segment_reader(0);
|
let reader = searcher.segment_reader(0);
|
||||||
{
|
{
|
||||||
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
|
let mut scorer = weight.scorer(reader, 2.0).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 2.0f32);
|
assert_eq!(scorer.score(), 2.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
|
let mut scorer = weight.scorer(reader, 1.5).unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.5f32);
|
assert_eq!(scorer.score(), 1.5);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,9 +5,8 @@ use crate::query::{BitSetDocSet, Explanation};
|
|||||||
use crate::query::{Scorer, Weight};
|
use crate::query::{Scorer, Weight};
|
||||||
use crate::schema::{Field, IndexRecordOption};
|
use crate::schema::{Field, IndexRecordOption};
|
||||||
use crate::termdict::{TermDictionary, TermStreamer};
|
use crate::termdict::{TermDictionary, TermStreamer};
|
||||||
use crate::DocId;
|
|
||||||
use crate::Result;
|
|
||||||
use crate::TantivyError;
|
use crate::TantivyError;
|
||||||
|
use crate::{DocId, Score};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tantivy_fst::Automaton;
|
use tantivy_fst::Automaton;
|
||||||
|
|
||||||
@@ -40,10 +39,9 @@ impl<A> Weight for AutomatonWeight<A>
|
|||||||
where
|
where
|
||||||
A: Automaton + Send + Sync + 'static,
|
A: Automaton + Send + Sync + 'static,
|
||||||
{
|
{
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let max_doc = reader.max_doc();
|
let max_doc = reader.max_doc();
|
||||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||||
|
|
||||||
let inverted_index = reader.inverted_index(self.field);
|
let inverted_index = reader.inverted_index(self.field);
|
||||||
let term_dict = inverted_index.terms();
|
let term_dict = inverted_index.terms();
|
||||||
let mut term_stream = self.automaton_stream(term_dict);
|
let mut term_stream = self.automaton_stream(term_dict);
|
||||||
@@ -52,12 +50,14 @@ where
|
|||||||
let mut block_segment_postings = inverted_index
|
let mut block_segment_postings = inverted_index
|
||||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
||||||
loop {
|
loop {
|
||||||
for &doc in block_segment_postings.docs() {
|
let docs = block_segment_postings.docs();
|
||||||
doc_bitset.insert(doc);
|
if docs.is_empty() {
|
||||||
}
|
|
||||||
if !block_segment_postings.advance() {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
for &doc in docs {
|
||||||
|
doc_bitset.insert(doc);
|
||||||
|
}
|
||||||
|
block_segment_postings.advance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||||
@@ -65,10 +65,10 @@ where
|
|||||||
Ok(Box::new(const_scorer))
|
Ok(Box::new(const_scorer))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
if scorer.seek(doc) == doc {
|
if scorer.seek(doc) == doc {
|
||||||
Ok(Explanation::new("AutomatonScorer", 1.0f32))
|
Ok(Explanation::new("AutomatonScorer", 1.0))
|
||||||
} else {
|
} else {
|
||||||
Err(TantivyError::InvalidArgument(
|
Err(TantivyError::InvalidArgument(
|
||||||
"Document does not exist".to_string(),
|
"Document does not exist".to_string(),
|
||||||
@@ -90,7 +90,7 @@ mod tests {
|
|||||||
let mut schema = Schema::builder();
|
let mut schema = Schema::builder();
|
||||||
let title = schema.add_text_field("title", STRING);
|
let title = schema.add_text_field("title", STRING);
|
||||||
let index = Index::create_in_ram(schema.build());
|
let index = Index::create_in_ram(schema.build());
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(title=>"abc"));
|
index_writer.add_document(doc!(title=>"abc"));
|
||||||
index_writer.add_document(doc!(title=>"bcd"));
|
index_writer.add_document(doc!(title=>"bcd"));
|
||||||
index_writer.add_document(doc!(title=>"abcd"));
|
index_writer.add_document(doc!(title=>"abcd"));
|
||||||
@@ -143,13 +143,13 @@ mod tests {
|
|||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let mut scorer = automaton_weight
|
let mut scorer = automaton_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
.scorer(searcher.segment_reader(0u32), 1.0)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.0f32);
|
assert_eq!(scorer.score(), 1.0);
|
||||||
assert_eq!(scorer.advance(), 2u32);
|
assert_eq!(scorer.advance(), 2u32);
|
||||||
assert_eq!(scorer.doc(), 2u32);
|
assert_eq!(scorer.doc(), 2u32);
|
||||||
assert_eq!(scorer.score(), 1.0f32);
|
assert_eq!(scorer.score(), 1.0);
|
||||||
assert_eq!(scorer.advance(), TERMINATED);
|
assert_eq!(scorer.advance(), TERMINATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,9 +161,9 @@ mod tests {
|
|||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
let mut scorer = automaton_weight
|
let mut scorer = automaton_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.32f32)
|
.scorer(searcher.segment_reader(0u32), 1.32)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(scorer.doc(), 0u32);
|
assert_eq!(scorer.doc(), 0u32);
|
||||||
assert_eq!(scorer.score(), 1.32f32);
|
assert_eq!(scorer.score(), 1.32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -61,21 +61,23 @@ impl DocSet for BitSetDocSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
|
if target >= self.docs.max_value() {
|
||||||
|
self.doc = TERMINATED;
|
||||||
|
return TERMINATED;
|
||||||
|
}
|
||||||
let target_bucket = target / 64u32;
|
let target_bucket = target / 64u32;
|
||||||
|
|
||||||
// Mask for all of the bits greater or equal
|
|
||||||
// to our target document.
|
|
||||||
if target_bucket > self.cursor_bucket {
|
if target_bucket > self.cursor_bucket {
|
||||||
self.go_to_bucket(target_bucket);
|
self.go_to_bucket(target_bucket);
|
||||||
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
|
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
|
||||||
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
|
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
|
||||||
self.advance();
|
self.advance()
|
||||||
|
} else {
|
||||||
|
let mut doc = self.doc();
|
||||||
|
while doc < target {
|
||||||
|
doc = self.advance();
|
||||||
|
}
|
||||||
|
doc
|
||||||
}
|
}
|
||||||
let mut doc = self.doc();
|
|
||||||
while doc < target {
|
|
||||||
doc = self.advance();
|
|
||||||
}
|
|
||||||
doc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current document
|
/// Returns the current document
|
||||||
@@ -114,6 +116,13 @@ mod tests {
|
|||||||
assert_eq!(empty.advance(), TERMINATED)
|
assert_eq!(empty.advance(), TERMINATED)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_seek_terminated() {
|
||||||
|
let bitset = BitSet::with_max_value(1000);
|
||||||
|
let mut empty = BitSetDocSet::from(bitset);
|
||||||
|
assert_eq!(empty.seek(TERMINATED), TERMINATED)
|
||||||
|
}
|
||||||
|
|
||||||
fn test_go_through_sequential(docs: &[DocId]) {
|
fn test_go_through_sequential(docs: &[DocId]) {
|
||||||
let mut docset = create_docbitset(docs, 1_000u32);
|
let mut docset = create_docbitset(docs, 1_000u32);
|
||||||
for &doc in docs {
|
for &doc in docs {
|
||||||
|
|||||||
@@ -3,21 +3,24 @@ use crate::query::Explanation;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
const K1: f32 = 1.2;
|
const K1: Score = 1.2;
|
||||||
const B: f32 = 0.75;
|
const B: Score = 0.75;
|
||||||
|
|
||||||
fn idf(doc_freq: u64, doc_count: u64) -> f32 {
|
fn idf(doc_freq: u64, doc_count: u64) -> Score {
|
||||||
let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5);
|
assert!(doc_count >= doc_freq, "{} >= {}", doc_count, doc_freq);
|
||||||
(1f32 + x).ln()
|
let x = ((doc_count - doc_freq) as Score + 0.5) / (doc_freq as Score + 0.5);
|
||||||
|
(1.0 + x).ln()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: f32) -> f32 {
|
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: Score) -> Score {
|
||||||
K1 * (1f32 - B + B * fieldnorm as f32 / average_fieldnorm)
|
K1 * (1.0 - B + B * fieldnorm as Score / average_fieldnorm)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
|
fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] {
|
||||||
let mut cache = [0f32; 256];
|
let mut cache: [Score; 256] = [0.0; 256];
|
||||||
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
|
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
|
||||||
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
|
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
|
||||||
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
|
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
|
||||||
@@ -25,15 +28,22 @@ fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
|
|||||||
cache
|
cache
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct BM25Params {
|
||||||
|
pub idf: Score,
|
||||||
|
pub avg_fieldnorm: Score,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct BM25Weight {
|
pub struct BM25Weight {
|
||||||
idf_explain: Explanation,
|
idf_explain: Explanation,
|
||||||
weight: f32,
|
weight: Score,
|
||||||
cache: [f32; 256],
|
cache: [Score; 256],
|
||||||
average_fieldnorm: f32,
|
average_fieldnorm: Score,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BM25Weight {
|
impl BM25Weight {
|
||||||
pub fn boost_by(&self, boost: f32) -> BM25Weight {
|
pub fn boost_by(&self, boost: Score) -> BM25Weight {
|
||||||
BM25Weight {
|
BM25Weight {
|
||||||
idf_explain: self.idf_explain.clone(),
|
idf_explain: self.idf_explain.clone(),
|
||||||
weight: self.weight * boost,
|
weight: self.weight * boost,
|
||||||
@@ -60,19 +70,11 @@ impl BM25Weight {
|
|||||||
total_num_tokens += inverted_index.total_num_tokens();
|
total_num_tokens += inverted_index.total_num_tokens();
|
||||||
total_num_docs += u64::from(segment_reader.max_doc());
|
total_num_docs += u64::from(segment_reader.max_doc());
|
||||||
}
|
}
|
||||||
let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
|
let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score;
|
||||||
|
|
||||||
let mut idf_explain: Explanation;
|
|
||||||
if terms.len() == 1 {
|
if terms.len() == 1 {
|
||||||
let term_doc_freq = searcher.doc_freq(&terms[0]);
|
let term_doc_freq = searcher.doc_freq(&terms[0]);
|
||||||
let idf = idf(term_doc_freq, total_num_docs);
|
BM25Weight::for_one_term(term_doc_freq, total_num_docs, average_fieldnorm)
|
||||||
idf_explain =
|
|
||||||
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
|
|
||||||
idf_explain.add_const(
|
|
||||||
"n, number of docs containing this term",
|
|
||||||
term_doc_freq as f32,
|
|
||||||
);
|
|
||||||
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
|
|
||||||
} else {
|
} else {
|
||||||
let idf = terms
|
let idf = terms
|
||||||
.iter()
|
.iter()
|
||||||
@@ -80,14 +82,30 @@ impl BM25Weight {
|
|||||||
let term_doc_freq = searcher.doc_freq(term);
|
let term_doc_freq = searcher.doc_freq(term);
|
||||||
idf(term_doc_freq, total_num_docs)
|
idf(term_doc_freq, total_num_docs)
|
||||||
})
|
})
|
||||||
.sum::<f32>();
|
.sum::<Score>();
|
||||||
idf_explain = Explanation::new("idf", idf);
|
let idf_explain = Explanation::new("idf", idf);
|
||||||
|
BM25Weight::new(idf_explain, average_fieldnorm)
|
||||||
}
|
}
|
||||||
BM25Weight::new(idf_explain, average_fieldnorm)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
|
pub fn for_one_term(
|
||||||
let weight = idf_explain.value() * (1f32 + K1);
|
term_doc_freq: u64,
|
||||||
|
total_num_docs: u64,
|
||||||
|
avg_fieldnorm: Score,
|
||||||
|
) -> BM25Weight {
|
||||||
|
let idf = idf(term_doc_freq, total_num_docs);
|
||||||
|
let mut idf_explain =
|
||||||
|
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
|
||||||
|
idf_explain.add_const(
|
||||||
|
"n, number of docs containing this term",
|
||||||
|
term_doc_freq as Score,
|
||||||
|
);
|
||||||
|
idf_explain.add_const("N, total number of docs", total_num_docs as Score);
|
||||||
|
BM25Weight::new(idf_explain, avg_fieldnorm)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new(idf_explain: Explanation, average_fieldnorm: Score) -> BM25Weight {
|
||||||
|
let weight = idf_explain.value() * (1.0 + K1);
|
||||||
BM25Weight {
|
BM25Weight {
|
||||||
idf_explain,
|
idf_explain,
|
||||||
weight,
|
weight,
|
||||||
@@ -98,19 +116,27 @@ impl BM25Weight {
|
|||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
|
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
|
||||||
|
self.weight * self.tf_factor(fieldnorm_id, term_freq)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn max_score(&self) -> Score {
|
||||||
|
self.score(255u8, 2_013_265_944)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
|
||||||
|
let term_freq = term_freq as Score;
|
||||||
let norm = self.cache[fieldnorm_id as usize];
|
let norm = self.cache[fieldnorm_id as usize];
|
||||||
let term_freq = term_freq as f32;
|
term_freq / (term_freq + norm)
|
||||||
self.weight * term_freq / (term_freq + norm)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
|
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
|
||||||
// The explain format is directly copied from Lucene's.
|
// The explain format is directly copied from Lucene's.
|
||||||
// (So, Kudos to Lucene)
|
// (So, Kudos to Lucene)
|
||||||
|
|
||||||
let score = self.score(fieldnorm_id, term_freq);
|
let score = self.score(fieldnorm_id, term_freq);
|
||||||
|
|
||||||
let norm = self.cache[fieldnorm_id as usize];
|
let norm = self.cache[fieldnorm_id as usize];
|
||||||
let term_freq = term_freq as f32;
|
let term_freq = term_freq as Score;
|
||||||
let right_factor = term_freq / (term_freq + norm);
|
let right_factor = term_freq / (term_freq + norm);
|
||||||
|
|
||||||
let mut tf_explanation = Explanation::new(
|
let mut tf_explanation = Explanation::new(
|
||||||
@@ -123,12 +149,12 @@ impl BM25Weight {
|
|||||||
tf_explanation.add_const("b, length normalization parameter", B);
|
tf_explanation.add_const("b, length normalization parameter", B);
|
||||||
tf_explanation.add_const(
|
tf_explanation.add_const(
|
||||||
"dl, length of field",
|
"dl, length of field",
|
||||||
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as f32,
|
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as Score,
|
||||||
);
|
);
|
||||||
tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm);
|
tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm);
|
||||||
|
|
||||||
let mut explanation = Explanation::new("TermQuery, product of...", score);
|
let mut explanation = Explanation::new("TermQuery, product of...", score);
|
||||||
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1f32));
|
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1.0));
|
||||||
explanation.add_detail(self.idf_explain.clone());
|
explanation.add_detail(self.idf_explain.clone());
|
||||||
explanation.add_detail(tf_explanation);
|
explanation.add_detail(tf_explanation);
|
||||||
explanation
|
explanation
|
||||||
@@ -139,10 +165,11 @@ impl BM25Weight {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::idf;
|
use super::idf;
|
||||||
use crate::tests::assert_nearly_equals;
|
use crate::{assert_nearly_equals, Score};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_idf() {
|
fn test_idf() {
|
||||||
assert_nearly_equals(idf(1, 2), 0.6931472);
|
let score: Score = 2.0;
|
||||||
|
assert_nearly_equals!(idf(1, 2), score.ln());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
434
src/query/boolean_query/block_wand.rs
Normal file
434
src/query/boolean_query/block_wand.rs
Normal file
@@ -0,0 +1,434 @@
|
|||||||
|
use crate::query::term_query::TermScorer;
|
||||||
|
use crate::query::Scorer;
|
||||||
|
use crate::{DocId, DocSet, Score, TERMINATED};
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::ops::DerefMut;
|
||||||
|
|
||||||
|
/// Takes a term_scorers sorted by their current doc() and a threshold and returns
|
||||||
|
/// Returns (pivot_len, pivot_ord) defined as follows:
|
||||||
|
/// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score.
|
||||||
|
/// - `before_pivot_len` number of term_scorers such that term_scorer.doc() < pivot.
|
||||||
|
/// - `pivot_len` number of term_scorers such that term_scorer.doc() <= pivot.
|
||||||
|
///
|
||||||
|
/// We always have `before_pivot_len` < `pivot_len`.
|
||||||
|
///
|
||||||
|
/// None is returned if we establish that no document can exceed the threshold.
|
||||||
|
fn find_pivot_doc(
|
||||||
|
term_scorers: &[TermScorerWithMaxScore],
|
||||||
|
threshold: Score,
|
||||||
|
) -> Option<(usize, usize, DocId)> {
|
||||||
|
let mut max_score = 0.0;
|
||||||
|
let mut before_pivot_len = 0;
|
||||||
|
let mut pivot_doc = TERMINATED;
|
||||||
|
while before_pivot_len < term_scorers.len() {
|
||||||
|
let term_scorer = &term_scorers[before_pivot_len];
|
||||||
|
max_score += term_scorer.max_score;
|
||||||
|
if max_score > threshold {
|
||||||
|
pivot_doc = term_scorer.doc();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
before_pivot_len += 1;
|
||||||
|
}
|
||||||
|
if pivot_doc == TERMINATED {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
// Right now i is an ordinal, we want a len.
|
||||||
|
let mut pivot_len = before_pivot_len + 1;
|
||||||
|
// Some other term_scorer may be positioned on the same document.
|
||||||
|
pivot_len += term_scorers[pivot_len..]
|
||||||
|
.iter()
|
||||||
|
.take_while(|term_scorer| term_scorer.doc() == pivot_doc)
|
||||||
|
.count();
|
||||||
|
Some((before_pivot_len, pivot_len, pivot_doc))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Before and after calling this method, scorers need to be sorted by their `.doc()`.
|
||||||
|
fn block_max_was_too_low_advance_one_scorer(
|
||||||
|
scorers: &mut Vec<TermScorerWithMaxScore>,
|
||||||
|
pivot_len: usize,
|
||||||
|
) {
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
let mut scorer_to_seek = pivot_len - 1;
|
||||||
|
let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
|
||||||
|
for scorer_ord in (0..pivot_len - 1).rev() {
|
||||||
|
let scorer = &scorers[scorer_ord];
|
||||||
|
if scorer.last_doc_in_block() <= doc_to_seek_after {
|
||||||
|
doc_to_seek_after = scorer.last_doc_in_block();
|
||||||
|
scorer_to_seek = scorer_ord;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for scorer in &scorers[pivot_len..] {
|
||||||
|
if scorer.doc() <= doc_to_seek_after {
|
||||||
|
doc_to_seek_after = scorer.doc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scorers[scorer_to_seek].seek(doc_to_seek_after + 1);
|
||||||
|
restore_ordering(scorers, scorer_to_seek);
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
|
||||||
|
// except term_scorers[ord] that might be in advance compared to its ranks,
|
||||||
|
// bubble up term_scorers[ord] in order to restore the ordering.
|
||||||
|
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
|
||||||
|
let doc = term_scorers[ord].doc();
|
||||||
|
for i in ord + 1..term_scorers.len() {
|
||||||
|
if term_scorers[i].doc() >= doc {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
term_scorers.swap(i, i - 1);
|
||||||
|
}
|
||||||
|
debug_assert!(is_sorted(term_scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempts to advance all term_scorers between `&term_scorers[0..before_len]` to the pivot.
|
||||||
|
// If this works, return true.
|
||||||
|
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
|
||||||
|
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
|
||||||
|
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
|
||||||
|
fn align_scorers(
|
||||||
|
term_scorers: &mut Vec<TermScorerWithMaxScore>,
|
||||||
|
pivot_doc: DocId,
|
||||||
|
before_pivot_len: usize,
|
||||||
|
) -> bool {
|
||||||
|
debug_assert_ne!(pivot_doc, TERMINATED);
|
||||||
|
for i in (0..before_pivot_len).rev() {
|
||||||
|
let new_doc = term_scorers[i].seek(pivot_doc);
|
||||||
|
if new_doc != pivot_doc {
|
||||||
|
if new_doc == TERMINATED {
|
||||||
|
term_scorers.swap_remove(i);
|
||||||
|
}
|
||||||
|
// We went past the pivot.
|
||||||
|
// We just go through the outer loop mechanic (Note that pivot is
|
||||||
|
// still a possible candidate).
|
||||||
|
//
|
||||||
|
// Termination is still guaranteed since we can only consider the same
|
||||||
|
// pivot at most term_scorers.len() - 1 times.
|
||||||
|
restore_ordering(term_scorers, i);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
|
||||||
|
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
|
||||||
|
// Restores the ordering of term_scorers.
|
||||||
|
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
|
||||||
|
for term_scorer in &mut term_scorers[..pivot_len] {
|
||||||
|
term_scorer.advance();
|
||||||
|
}
|
||||||
|
// TODO use drain_filter when available.
|
||||||
|
let mut i = 0;
|
||||||
|
while i != term_scorers.len() {
|
||||||
|
if term_scorers[i].doc() == TERMINATED {
|
||||||
|
term_scorers.swap_remove(i);
|
||||||
|
} else {
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
term_scorers.sort_by_key(|scorer| scorer.doc());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn block_wand(
|
||||||
|
mut scorers: Vec<TermScorer>,
|
||||||
|
mut threshold: Score,
|
||||||
|
callback: &mut dyn FnMut(u32, Score) -> Score,
|
||||||
|
) {
|
||||||
|
let mut scorers: Vec<TermScorerWithMaxScore> = scorers
|
||||||
|
.iter_mut()
|
||||||
|
.map(TermScorerWithMaxScore::from)
|
||||||
|
.collect();
|
||||||
|
scorers.sort_by_key(|scorer| scorer.doc());
|
||||||
|
// At this point we need to ensure that the scorers are sorted!
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
while let Some((before_pivot_len, pivot_len, pivot_doc)) =
|
||||||
|
find_pivot_doc(&scorers[..], threshold)
|
||||||
|
{
|
||||||
|
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
|
||||||
|
debug_assert_ne!(pivot_doc, TERMINATED);
|
||||||
|
debug_assert!(before_pivot_len < pivot_len);
|
||||||
|
|
||||||
|
let block_max_score_upperbound: Score = scorers[..pivot_len]
|
||||||
|
.iter_mut()
|
||||||
|
.map(|scorer| {
|
||||||
|
scorer.shallow_seek(pivot_doc);
|
||||||
|
scorer.block_max_score()
|
||||||
|
})
|
||||||
|
.sum();
|
||||||
|
|
||||||
|
// Beware after shallow advance, skip readers can be in advance compared to
|
||||||
|
// the segment posting lists.
|
||||||
|
//
|
||||||
|
// `block_segment_postings.load_block()` need to be called separately.
|
||||||
|
if block_max_score_upperbound <= threshold {
|
||||||
|
// Block max condition was not reached
|
||||||
|
// We could get away by simply advancing the scorers to DocId + 1 but it would
|
||||||
|
// be inefficient. The optimization requires proper explanation and was
|
||||||
|
// isolated in a different function.
|
||||||
|
block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block max condition is observed.
|
||||||
|
//
|
||||||
|
// Let's try and advance all scorers before the pivot to the pivot.
|
||||||
|
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
|
||||||
|
// At least of the scorer does not contain the pivot.
|
||||||
|
//
|
||||||
|
// Let's stop scoring this pivot and go through the pivot selection again.
|
||||||
|
// Note that the current pivot is not necessarily a bad candidate and it
|
||||||
|
// may be picked again.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, all scorers are positioned on the doc.
|
||||||
|
let score = scorers[..pivot_len]
|
||||||
|
.iter_mut()
|
||||||
|
.map(|scorer| scorer.score())
|
||||||
|
.sum();
|
||||||
|
if score > threshold {
|
||||||
|
threshold = callback(pivot_doc, score);
|
||||||
|
}
|
||||||
|
// let's advance all of the scorers that are currently positioned on the pivot.
|
||||||
|
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TermScorerWithMaxScore<'a> {
|
||||||
|
scorer: &'a mut TermScorer,
|
||||||
|
max_score: Score,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
|
||||||
|
fn from(scorer: &'a mut TermScorer) -> Self {
|
||||||
|
let max_score = scorer.max_score();
|
||||||
|
TermScorerWithMaxScore { scorer, max_score }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Deref for TermScorerWithMaxScore<'a> {
|
||||||
|
type Target = TermScorer;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.scorer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
self.scorer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
|
||||||
|
if let Some(first) = it.next() {
|
||||||
|
let mut prev = first;
|
||||||
|
for doc in it {
|
||||||
|
if doc < prev {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
prev = doc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::query::score_combiner::SumCombiner;
|
||||||
|
use crate::query::term_query::TermScorer;
|
||||||
|
use crate::query::Union;
|
||||||
|
use crate::query::{BM25Weight, Scorer};
|
||||||
|
use crate::{DocId, DocSet, Score, TERMINATED};
|
||||||
|
use proptest::prelude::*;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
use std::collections::BinaryHeap;
|
||||||
|
use std::iter;
|
||||||
|
|
||||||
|
struct Float(Score);
|
||||||
|
|
||||||
|
impl Eq for Float {}
|
||||||
|
|
||||||
|
impl PartialEq for Float {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.cmp(&other) == Ordering::Equal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for Float {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for Float {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
other.0.partial_cmp(&self.0).unwrap_or(Ordering::Equal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn nearly_equals(left: Score, right: Score) -> bool {
|
||||||
|
(left - right).abs() < 0.000001 * (left + right).abs()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_checkpoints_for_each_pruning(
|
||||||
|
term_scorers: Vec<TermScorer>,
|
||||||
|
n: usize,
|
||||||
|
) -> Vec<(DocId, Score)> {
|
||||||
|
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
|
||||||
|
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
|
||||||
|
let mut limit: Score = 0.0;
|
||||||
|
super::block_wand(term_scorers, Score::MIN, &mut |doc, score| {
|
||||||
|
heap.push(Float(score));
|
||||||
|
if heap.len() > n {
|
||||||
|
heap.pop().unwrap();
|
||||||
|
}
|
||||||
|
if heap.len() == n {
|
||||||
|
limit = heap.peek().unwrap().0;
|
||||||
|
}
|
||||||
|
if !nearly_equals(score, limit) {
|
||||||
|
checkpoints.push((doc, score));
|
||||||
|
}
|
||||||
|
return limit;
|
||||||
|
});
|
||||||
|
checkpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
|
||||||
|
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
|
||||||
|
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
|
||||||
|
let mut scorer: Union<TermScorer, SumCombiner> = Union::from(term_scorers);
|
||||||
|
|
||||||
|
let mut limit = Score::MIN;
|
||||||
|
loop {
|
||||||
|
if scorer.doc() == TERMINATED {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let doc = scorer.doc();
|
||||||
|
let score = scorer.score();
|
||||||
|
if score > limit {
|
||||||
|
heap.push(Float(score));
|
||||||
|
if heap.len() > n {
|
||||||
|
heap.pop().unwrap();
|
||||||
|
}
|
||||||
|
if heap.len() == n {
|
||||||
|
limit = heap.peek().unwrap().0;
|
||||||
|
}
|
||||||
|
if !nearly_equals(score, limit) {
|
||||||
|
checkpoints.push((doc, score));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scorer.advance();
|
||||||
|
}
|
||||||
|
checkpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
const MAX_TERM_FREQ: u32 = 100u32;
|
||||||
|
|
||||||
|
fn posting_list(max_doc: u32) -> BoxedStrategy<Vec<(DocId, u32)>> {
|
||||||
|
(1..max_doc + 1)
|
||||||
|
.prop_flat_map(move |doc_freq| {
|
||||||
|
(
|
||||||
|
proptest::bits::bitset::sampled(doc_freq as usize, 0..max_doc as usize),
|
||||||
|
proptest::collection::vec(1u32..MAX_TERM_FREQ, doc_freq as usize),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.prop_map(|(docset, term_freqs)| {
|
||||||
|
docset
|
||||||
|
.iter()
|
||||||
|
.map(|doc| doc as u32)
|
||||||
|
.zip(term_freqs.iter().cloned())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gen_term_scorers(num_scorers: usize) -> BoxedStrategy<(Vec<Vec<(DocId, u32)>>, Vec<u32>)> {
|
||||||
|
(1u32..100u32)
|
||||||
|
.prop_flat_map(move |max_doc: u32| {
|
||||||
|
(
|
||||||
|
proptest::collection::vec(posting_list(max_doc), num_scorers),
|
||||||
|
proptest::collection::vec(2u32..10u32 * MAX_TERM_FREQ, max_doc as usize),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_block_wand_aux(posting_lists: &[Vec<(DocId, u32)>], fieldnorms: &[u32]) {
|
||||||
|
// We virtually repeat all docs 64 times in order to emulate blocks of 2 documents
|
||||||
|
// and surface blogs more easily.
|
||||||
|
const REPEAT: usize = 64;
|
||||||
|
let fieldnorms_expanded = fieldnorms
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.flat_map(|fieldnorm| iter::repeat(fieldnorm).take(REPEAT))
|
||||||
|
.collect::<Vec<u32>>();
|
||||||
|
|
||||||
|
let postings_lists_expanded: Vec<Vec<(DocId, u32)>> = posting_lists
|
||||||
|
.iter()
|
||||||
|
.map(|posting_list| {
|
||||||
|
posting_list
|
||||||
|
.into_iter()
|
||||||
|
.cloned()
|
||||||
|
.flat_map(|(doc, term_freq)| {
|
||||||
|
(0 as u32..REPEAT as u32).map(move |offset| {
|
||||||
|
(
|
||||||
|
doc * (REPEAT as u32) + offset,
|
||||||
|
if offset == 0 { term_freq } else { 1 },
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<(DocId, u32)>>()
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let total_fieldnorms: u64 = fieldnorms_expanded
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(|fieldnorm| fieldnorm as u64)
|
||||||
|
.sum();
|
||||||
|
let average_fieldnorm = (total_fieldnorms as Score) / (fieldnorms_expanded.len() as Score);
|
||||||
|
let max_doc = fieldnorms_expanded.len();
|
||||||
|
|
||||||
|
let term_scorers: Vec<TermScorer> = postings_lists_expanded
|
||||||
|
.iter()
|
||||||
|
.map(|postings| {
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(
|
||||||
|
postings.len() as u64,
|
||||||
|
max_doc as u64,
|
||||||
|
average_fieldnorm,
|
||||||
|
);
|
||||||
|
TermScorer::create_for_test(postings, &fieldnorms_expanded[..], bm25_weight)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
for top_k in 1..4 {
|
||||||
|
let checkpoints_for_each_pruning =
|
||||||
|
compute_checkpoints_for_each_pruning(term_scorers.clone(), top_k);
|
||||||
|
let checkpoints_manual = compute_checkpoints_manual(term_scorers.clone(), top_k);
|
||||||
|
assert_eq!(checkpoints_for_each_pruning.len(), checkpoints_manual.len());
|
||||||
|
for (&(left_doc, left_score), &(right_doc, right_score)) in checkpoints_for_each_pruning
|
||||||
|
.iter()
|
||||||
|
.zip(checkpoints_manual.iter())
|
||||||
|
{
|
||||||
|
assert_eq!(left_doc, right_doc);
|
||||||
|
assert!(nearly_equals(left_score, right_score));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||||
|
#[test]
|
||||||
|
fn test_block_wand_two_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(2)) {
|
||||||
|
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#![proptest_config(ProptestConfig::with_cases(500))]
|
||||||
|
#[test]
|
||||||
|
fn test_block_wand_three_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(3)) {
|
||||||
|
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
|
use crate::postings::FreqReadingOption;
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
|
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
|
||||||
use crate::query::term_query::TermScorer;
|
use crate::query::term_query::TermScorer;
|
||||||
@@ -14,12 +15,12 @@ use crate::query::{intersect_scorers, Explanation};
|
|||||||
use crate::{DocId, Score};
|
use crate::{DocId, Score};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
enum SpecializedScorer<TScoreCombiner: ScoreCombiner> {
|
enum SpecializedScorer {
|
||||||
TermUnion(Union<TermScorer, TScoreCombiner>),
|
TermUnion(Vec<TermScorer>),
|
||||||
Other(Box<dyn Scorer>),
|
Other(Box<dyn Scorer>),
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer<TScoreCombiner>
|
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer
|
||||||
where
|
where
|
||||||
TScoreCombiner: ScoreCombiner,
|
TScoreCombiner: ScoreCombiner,
|
||||||
{
|
{
|
||||||
@@ -35,20 +36,29 @@ where
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
|
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
|
||||||
.collect();
|
.collect();
|
||||||
return SpecializedScorer::TermUnion(Union::<TermScorer, TScoreCombiner>::from(
|
if scorers
|
||||||
scorers,
|
.iter()
|
||||||
));
|
.all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq)
|
||||||
|
{
|
||||||
|
// Block wand is only available iff we read frequencies.
|
||||||
|
return SpecializedScorer::TermUnion(scorers);
|
||||||
|
} else {
|
||||||
|
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
|
||||||
|
scorers,
|
||||||
|
)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
|
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TScoreCombiner: ScoreCombiner> Into<Box<dyn Scorer>> for SpecializedScorer<TScoreCombiner> {
|
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(scorer: SpecializedScorer) -> Box<dyn Scorer> {
|
||||||
fn into(self) -> Box<dyn Scorer> {
|
match scorer {
|
||||||
match self {
|
SpecializedScorer::TermUnion(term_scorers) => {
|
||||||
Self::TermUnion(union) => Box::new(union),
|
let union_scorer = Union::<TermScorer, TScoreCombiner>::from(term_scorers);
|
||||||
Self::Other(scorer) => scorer,
|
Box::new(union_scorer)
|
||||||
}
|
}
|
||||||
|
SpecializedScorer::Other(scorer) => scorer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,7 +78,7 @@ impl BooleanWeight {
|
|||||||
fn per_occur_scorers(
|
fn per_occur_scorers(
|
||||||
&self,
|
&self,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
boost: f32,
|
boost: Score,
|
||||||
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
|
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
|
||||||
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
|
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
|
||||||
for &(ref occur, ref subweight) in &self.weights {
|
for &(ref occur, ref subweight) in &self.weights {
|
||||||
@@ -84,47 +94,48 @@ impl BooleanWeight {
|
|||||||
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
|
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
|
||||||
&self,
|
&self,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
boost: f32,
|
boost: Score,
|
||||||
) -> crate::Result<SpecializedScorer<TScoreCombiner>> {
|
) -> crate::Result<SpecializedScorer> {
|
||||||
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
|
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
|
||||||
|
|
||||||
let should_scorer_opt: Option<SpecializedScorer<TScoreCombiner>> = per_occur_scorers
|
let should_scorer_opt: Option<SpecializedScorer> = per_occur_scorers
|
||||||
.remove(&Occur::Should)
|
.remove(&Occur::Should)
|
||||||
.map(scorer_union::<TScoreCombiner>);
|
.map(scorer_union::<TScoreCombiner>);
|
||||||
|
|
||||||
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||||
.remove(&Occur::MustNot)
|
.remove(&Occur::MustNot)
|
||||||
.map(scorer_union::<TScoreCombiner>)
|
.map(scorer_union::<DoNothingCombiner>)
|
||||||
.map(Into::into);
|
.map(into_box_scorer::<DoNothingCombiner>);
|
||||||
|
|
||||||
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||||
.remove(&Occur::Must)
|
.remove(&Occur::Must)
|
||||||
.map(intersect_scorers);
|
.map(intersect_scorers);
|
||||||
|
|
||||||
let positive_scorer: SpecializedScorer<TScoreCombiner> =
|
let positive_scorer: SpecializedScorer = match (should_scorer_opt, must_scorer_opt) {
|
||||||
match (should_scorer_opt, must_scorer_opt) {
|
(Some(should_scorer), Some(must_scorer)) => {
|
||||||
(Some(should_scorer), Some(must_scorer)) => {
|
if self.scoring_enabled {
|
||||||
if self.scoring_enabled {
|
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
|
||||||
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
|
Box<dyn Scorer>,
|
||||||
Box<dyn Scorer>,
|
Box<dyn Scorer>,
|
||||||
Box<dyn Scorer>,
|
TScoreCombiner,
|
||||||
TScoreCombiner,
|
>::new(
|
||||||
>::new(
|
must_scorer,
|
||||||
must_scorer, should_scorer.into()
|
into_box_scorer::<TScoreCombiner>(should_scorer),
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
SpecializedScorer::Other(must_scorer)
|
SpecializedScorer::Other(must_scorer)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
|
}
|
||||||
(Some(should_scorer), None) => should_scorer,
|
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
|
||||||
(None, None) => {
|
(Some(should_scorer), None) => should_scorer,
|
||||||
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
|
(None, None) => {
|
||||||
}
|
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
|
||||||
};
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(exclude_scorer) = exclude_scorer_opt {
|
if let Some(exclude_scorer) = exclude_scorer_opt {
|
||||||
let positive_scorer_boxed: Box<dyn Scorer> = positive_scorer.into();
|
let positive_scorer_boxed: Box<dyn Scorer> =
|
||||||
|
into_box_scorer::<TScoreCombiner>(positive_scorer);
|
||||||
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
|
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
|
||||||
positive_scorer_boxed,
|
positive_scorer_boxed,
|
||||||
exclude_scorer,
|
exclude_scorer,
|
||||||
@@ -136,7 +147,7 @@ impl BooleanWeight {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Weight for BooleanWeight {
|
impl Weight for BooleanWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
if self.weights.is_empty() {
|
if self.weights.is_empty() {
|
||||||
Ok(Box::new(EmptyScorer))
|
Ok(Box::new(EmptyScorer))
|
||||||
} else if self.weights.len() == 1 {
|
} else if self.weights.len() == 1 {
|
||||||
@@ -148,20 +159,22 @@ impl Weight for BooleanWeight {
|
|||||||
}
|
}
|
||||||
} else if self.scoring_enabled {
|
} else if self.scoring_enabled {
|
||||||
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
|
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
|
||||||
.map(Into::into)
|
.map(|specialized_scorer| {
|
||||||
|
into_box_scorer::<SumWithCoordsCombiner>(specialized_scorer)
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
self.complex_scorer::<DoNothingCombiner>(reader, boost)
|
self.complex_scorer::<DoNothingCombiner>(reader, boost)
|
||||||
.map(Into::into)
|
.map(into_box_scorer::<DoNothingCombiner>)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.seek(doc) != doc {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
if !self.scoring_enabled {
|
if !self.scoring_enabled {
|
||||||
return Ok(Explanation::new("BooleanQuery with no scoring", 1f32));
|
return Ok(Explanation::new("BooleanQuery with no scoring", 1.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut explanation = Explanation::new("BooleanClause. Sum of ...", scorer.score());
|
let mut explanation = Explanation::new("BooleanClause. Sum of ...", scorer.score());
|
||||||
@@ -180,9 +193,11 @@ impl Weight for BooleanWeight {
|
|||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
callback: &mut dyn FnMut(DocId, Score),
|
callback: &mut dyn FnMut(DocId, Score),
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
|
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
|
||||||
match scorer {
|
match scorer {
|
||||||
SpecializedScorer::TermUnion(mut union_scorer) => {
|
SpecializedScorer::TermUnion(term_scorers) => {
|
||||||
|
let mut union_scorer =
|
||||||
|
Union::<TermScorer, SumWithCoordsCombiner>::from(term_scorers);
|
||||||
for_each_scorer(&mut union_scorer, callback);
|
for_each_scorer(&mut union_scorer, callback);
|
||||||
}
|
}
|
||||||
SpecializedScorer::Other(mut scorer) => {
|
SpecializedScorer::Other(mut scorer) => {
|
||||||
@@ -204,14 +219,14 @@ impl Weight for BooleanWeight {
|
|||||||
/// important optimization (e.g. BlockWAND for union).
|
/// important optimization (e.g. BlockWAND for union).
|
||||||
fn for_each_pruning(
|
fn for_each_pruning(
|
||||||
&self,
|
&self,
|
||||||
threshold: f32,
|
threshold: Score,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
|
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
|
||||||
match scorer {
|
match scorer {
|
||||||
SpecializedScorer::TermUnion(mut union_scorer) => {
|
SpecializedScorer::TermUnion(term_scorers) => {
|
||||||
for_each_pruning_scorer(&mut union_scorer, threshold, callback);
|
super::block_wand(term_scorers, threshold, callback);
|
||||||
}
|
}
|
||||||
SpecializedScorer::Other(mut scorer) => {
|
SpecializedScorer::Other(mut scorer) => {
|
||||||
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
|
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
|
||||||
|
|||||||
@@ -1,13 +1,17 @@
|
|||||||
|
mod block_wand;
|
||||||
mod boolean_query;
|
mod boolean_query;
|
||||||
mod boolean_weight;
|
mod boolean_weight;
|
||||||
|
|
||||||
|
pub(crate) use self::block_wand::block_wand;
|
||||||
pub use self::boolean_query::BooleanQuery;
|
pub use self::boolean_query::BooleanQuery;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::assert_nearly_equals;
|
||||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||||
|
use crate::collector::TopDocs;
|
||||||
use crate::query::score_combiner::SumWithCoordsCombiner;
|
use crate::query::score_combiner::SumWithCoordsCombiner;
|
||||||
use crate::query::term_query::TermScorer;
|
use crate::query::term_query::TermScorer;
|
||||||
use crate::query::Intersection;
|
use crate::query::Intersection;
|
||||||
@@ -18,9 +22,8 @@ mod tests {
|
|||||||
use crate::query::Scorer;
|
use crate::query::Scorer;
|
||||||
use crate::query::TermQuery;
|
use crate::query::TermQuery;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::tests::assert_nearly_equals;
|
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::{DocAddress, DocId};
|
use crate::{DocAddress, DocId, Score};
|
||||||
|
|
||||||
fn aux_test_helper() -> (Index, Field) {
|
fn aux_test_helper() -> (Index, Field) {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -29,7 +32,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
{
|
{
|
||||||
index_writer.add_document(doc!(text_field => "a b c"));
|
index_writer.add_document(doc!(text_field => "a b c"));
|
||||||
index_writer.add_document(doc!(text_field => "a c"));
|
index_writer.add_document(doc!(text_field => "a c"));
|
||||||
@@ -58,9 +61,7 @@ mod tests {
|
|||||||
let query = query_parser.parse_query("+a").unwrap();
|
let query = query_parser.parse_query("+a").unwrap();
|
||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let weight = query.weight(&searcher, true).unwrap();
|
let weight = query.weight(&searcher, true).unwrap();
|
||||||
let scorer = weight
|
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
|
||||||
.unwrap();
|
|
||||||
assert!(scorer.is::<TermScorer>());
|
assert!(scorer.is::<TermScorer>());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,17 +73,13 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let query = query_parser.parse_query("+a +b +c").unwrap();
|
let query = query_parser.parse_query("+a +b +c").unwrap();
|
||||||
let weight = query.weight(&searcher, true).unwrap();
|
let weight = query.weight(&searcher, true).unwrap();
|
||||||
let scorer = weight
|
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
|
||||||
.unwrap();
|
|
||||||
assert!(scorer.is::<Intersection<TermScorer>>());
|
assert!(scorer.is::<Intersection<TermScorer>>());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let query = query_parser.parse_query("+a +(b c)").unwrap();
|
let query = query_parser.parse_query("+a +(b c)").unwrap();
|
||||||
let weight = query.weight(&searcher, true).unwrap();
|
let weight = query.weight(&searcher, true).unwrap();
|
||||||
let scorer = weight
|
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
|
||||||
.unwrap();
|
|
||||||
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
|
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -95,9 +92,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let query = query_parser.parse_query("+a b").unwrap();
|
let query = query_parser.parse_query("+a b").unwrap();
|
||||||
let weight = query.weight(&searcher, true).unwrap();
|
let weight = query.weight(&searcher, true).unwrap();
|
||||||
let scorer = weight
|
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
|
||||||
.unwrap();
|
|
||||||
assert!(scorer.is::<RequiredOptionalScorer<
|
assert!(scorer.is::<RequiredOptionalScorer<
|
||||||
Box<dyn Scorer>,
|
Box<dyn Scorer>,
|
||||||
Box<dyn Scorer>,
|
Box<dyn Scorer>,
|
||||||
@@ -107,9 +102,7 @@ mod tests {
|
|||||||
{
|
{
|
||||||
let query = query_parser.parse_query("+a b").unwrap();
|
let query = query_parser.parse_query("+a b").unwrap();
|
||||||
let weight = query.weight(&searcher, false).unwrap();
|
let weight = query.weight(&searcher, false).unwrap();
|
||||||
let scorer = weight
|
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
|
||||||
.unwrap();
|
|
||||||
assert!(scorer.is::<TermScorer>());
|
assert!(scorer.is::<TermScorer>());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -140,7 +133,6 @@ mod tests {
|
|||||||
.map(|doc| doc.1)
|
.map(|doc| doc.1)
|
||||||
.collect::<Vec<DocId>>()
|
.collect::<Vec<DocId>>()
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
let boolean_query = BooleanQuery::from(vec![(Occur::Must, make_term_query("a"))]);
|
let boolean_query = BooleanQuery::from(vec![(Occur::Must, make_term_query("a"))]);
|
||||||
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
|
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
|
||||||
@@ -177,6 +169,54 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_boolean_query_two_excluded() {
|
||||||
|
let (index, text_field) = aux_test_helper();
|
||||||
|
|
||||||
|
let make_term_query = |text: &str| {
|
||||||
|
let term_query = TermQuery::new(
|
||||||
|
Term::from_field_text(text_field, text),
|
||||||
|
IndexRecordOption::Basic,
|
||||||
|
);
|
||||||
|
let query: Box<dyn Query> = Box::new(term_query);
|
||||||
|
query
|
||||||
|
};
|
||||||
|
|
||||||
|
let reader = index.reader().unwrap();
|
||||||
|
|
||||||
|
let matching_topdocs = |query: &dyn Query| {
|
||||||
|
reader
|
||||||
|
.searcher()
|
||||||
|
.search(query, &TopDocs::with_limit(3))
|
||||||
|
.unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
let score_doc_4: Score; // score of doc 4 should not be influenced by exclusion
|
||||||
|
{
|
||||||
|
let boolean_query_no_excluded =
|
||||||
|
BooleanQuery::from(vec![(Occur::Must, make_term_query("d"))]);
|
||||||
|
let topdocs_no_excluded = matching_topdocs(&boolean_query_no_excluded);
|
||||||
|
assert_eq!(topdocs_no_excluded.len(), 2);
|
||||||
|
let (top_score, top_doc) = topdocs_no_excluded[0];
|
||||||
|
assert_eq!(top_doc, DocAddress(0, 4));
|
||||||
|
assert_eq!(topdocs_no_excluded[1].1, DocAddress(0, 3)); // ignore score of doc 3.
|
||||||
|
score_doc_4 = top_score;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let boolean_query_two_excluded = BooleanQuery::from(vec![
|
||||||
|
(Occur::Must, make_term_query("d")),
|
||||||
|
(Occur::MustNot, make_term_query("a")),
|
||||||
|
(Occur::MustNot, make_term_query("b")),
|
||||||
|
]);
|
||||||
|
let topdocs_excluded = matching_topdocs(&boolean_query_two_excluded);
|
||||||
|
assert_eq!(topdocs_excluded.len(), 1);
|
||||||
|
let (top_score, top_doc) = topdocs_excluded[0];
|
||||||
|
assert_eq!(top_doc, DocAddress(0, 4));
|
||||||
|
assert_eq!(top_score, score_doc_4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_boolean_query_with_weight() {
|
pub fn test_boolean_query_with_weight() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -184,7 +224,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "a b c"));
|
index_writer.add_document(doc!(text_field => "a b c"));
|
||||||
index_writer.add_document(doc!(text_field => "a c"));
|
index_writer.add_document(doc!(text_field => "a c"));
|
||||||
index_writer.add_document(doc!(text_field => "b c"));
|
index_writer.add_document(doc!(text_field => "b c"));
|
||||||
@@ -205,17 +245,17 @@ mod tests {
|
|||||||
let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
|
let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
|
||||||
{
|
{
|
||||||
let mut boolean_scorer = boolean_weight
|
let mut boolean_scorer = boolean_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
.scorer(searcher.segment_reader(0u32), 1.0)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||||
assert_nearly_equals(boolean_scorer.score(), 0.84163445f32);
|
assert_nearly_equals!(boolean_scorer.score(), 0.84163445);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut boolean_scorer = boolean_weight
|
let mut boolean_scorer = boolean_weight
|
||||||
.scorer(searcher.segment_reader(0u32), 2.0f32)
|
.scorer(searcher.segment_reader(0u32), 2.0)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(boolean_scorer.doc(), 0u32);
|
assert_eq!(boolean_scorer.doc(), 0u32);
|
||||||
assert_nearly_equals(boolean_scorer.score(), 1.6832689f32);
|
assert_nearly_equals!(boolean_scorer.score(), 1.6832689);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,170 +285,9 @@ mod tests {
|
|||||||
(Occur::Must, make_term_query("a")),
|
(Occur::Must, make_term_query("a")),
|
||||||
(Occur::Must, make_term_query("b")),
|
(Occur::Must, make_term_query("b")),
|
||||||
]);
|
]);
|
||||||
assert_eq!(score_docs(&boolean_query), vec![0.977973, 0.84699446]);
|
let scores = score_docs(&boolean_query);
|
||||||
|
assert_nearly_equals!(scores[0], 0.977973);
|
||||||
|
assert_nearly_equals!(scores[1], 0.84699446);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// motivated by #554
|
|
||||||
#[test]
|
|
||||||
fn test_bm25_several_fields() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let title = schema_builder.add_text_field("title", TEXT);
|
|
||||||
let text = schema_builder.add_text_field("text", TEXT);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
// tf = 1 0
|
|
||||||
title => "Законы притяжения Оксана Кулакова",
|
|
||||||
// tf = 1 0
|
|
||||||
text => "Законы притяжения Оксана Кулакова] \n\nТема: Сексуальное искусство, Женственность\nТип товара: Запись вебинара (аудио)\nПродолжительность: 1,5 часа\n\nСсылка на вебинар:\n ",
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
// tf = 1 0
|
|
||||||
title => "Любимые русские пироги (Оксана Путан)",
|
|
||||||
// tf = 2 0
|
|
||||||
text => "http://i95.fastpic.ru/big/2017/0628/9a/615b9c8504d94a3893d7f496ac53539a.jpg \n\nОт издателя\nОксана Путан профессиональный повар, автор кулинарных книг и известный кулинарный блогер. Ее рецепты отличаются практичностью, доступностью и пользуются огромной популярностью в русскоязычном интернете. Это третья книга автора о самом вкусном и ароматном настоящих русских пирогах и выпечке!\nДаже новички на кухне легко готовят по ее рецептам. Оксана описывает процесс приготовления настолько подробно и понятно, что вам остается только наслаждаться готовкой и не тратить время на лишние усилия. Готовьте легко и просто!\n\nhttps://www.ozon.ru/context/detail/id/139872462/"
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
// tf = 1 1
|
|
||||||
title => "PDF Мастер Класс \"Морячок\" (Оксана Лифенко)",
|
|
||||||
// tf = 0 0
|
|
||||||
text => "https://i.ibb.co/pzvHrDN/I3d U T6 Gg TM.jpg\nhttps://i.ibb.co/NFrb6v6/N0ls Z9nwjb U.jpg\nВ описание входит штаны, кофта, берет, матросский воротник. Описание продается в формате PDF, состоит из 12 страниц формата А4 и может быть напечатано на любом принтере.\nОписание предназначено для кукол BJD RealPuki от FairyLand, но может подойти и другим подобным куклам. Также вы можете вязать этот наряд из обычной пряжи, и он подойдет для куколок побольше.\nhttps://vk.com/market 95724412?w=product 95724412_2212"
|
|
||||||
));
|
|
||||||
for _ in 0..1_000 {
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
title => "a b d e f g",
|
|
||||||
text => "maitre corbeau sur un arbre perche tenait dans son bec un fromage Maitre rnard par lodeur alleche lui tint a peu pres ce langage."
|
|
||||||
));
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![title, text]);
|
|
||||||
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
|
|
||||||
let weight = query.weight(&searcher, true).unwrap();
|
|
||||||
let mut scorer = weight
|
|
||||||
.scorer(searcher.segment_reader(0u32), 1.0f32)
|
|
||||||
.unwrap();
|
|
||||||
scorer.advance();
|
|
||||||
|
|
||||||
let explanation = query.explain(&searcher, DocAddress(0u32, 0u32)).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
explanation.to_pretty_json(),
|
|
||||||
r#"{
|
|
||||||
"value": 12.997711,
|
|
||||||
"description": "BooleanClause. Sum of ...",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 12.997711,
|
|
||||||
"description": "BooleanClause. Sum of ...",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 6.551476,
|
|
||||||
"description": "TermQuery, product of...",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 2.2,
|
|
||||||
"description": "(K1+1)"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 5.658984,
|
|
||||||
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 3.0,
|
|
||||||
"description": "n, number of docs containing this term"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 1003.0,
|
|
||||||
"description": "N, total number of docs"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 0.5262329,
|
|
||||||
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 1.0,
|
|
||||||
"description": "freq, occurrences of term within document"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 1.2,
|
|
||||||
"description": "k1, term saturation parameter"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 0.75,
|
|
||||||
"description": "b, length normalization parameter"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 4.0,
|
|
||||||
"description": "dl, length of field"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 5.997009,
|
|
||||||
"description": "avgdl, average length of field"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 6.446235,
|
|
||||||
"description": "TermQuery, product of...",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 2.2,
|
|
||||||
"description": "(K1+1)"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 5.9954567,
|
|
||||||
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 2.0,
|
|
||||||
"description": "n, number of docs containing this term"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 1003.0,
|
|
||||||
"description": "N, total number of docs"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 0.4887212,
|
|
||||||
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
|
|
||||||
"details": [
|
|
||||||
{
|
|
||||||
"value": 1.0,
|
|
||||||
"description": "freq, occurrences of term within document"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 1.2,
|
|
||||||
"description": "k1, term saturation parameter"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 0.75,
|
|
||||||
"description": "b, length normalization parameter"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 20.0,
|
|
||||||
"description": "dl, length of field"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"value": 24.123629,
|
|
||||||
"description": "avgdl, average length of field"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}"#
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use crate::fastfield::DeleteBitSet;
|
use crate::fastfield::DeleteBitSet;
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::{Explanation, Query, Scorer, Weight};
|
use crate::query::{Explanation, Query, Scorer, Weight};
|
||||||
use crate::{DocId, DocSet, Searcher, SegmentReader, Term};
|
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term};
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
@@ -12,12 +12,12 @@ use std::fmt;
|
|||||||
/// factor.
|
/// factor.
|
||||||
pub struct BoostQuery {
|
pub struct BoostQuery {
|
||||||
query: Box<dyn Query>,
|
query: Box<dyn Query>,
|
||||||
boost: f32,
|
boost: Score,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BoostQuery {
|
impl BoostQuery {
|
||||||
/// Builds a boost query.
|
/// Builds a boost query.
|
||||||
pub fn new(query: Box<dyn Query>, boost: f32) -> BoostQuery {
|
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery {
|
||||||
BoostQuery { query, boost }
|
BoostQuery { query, boost }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -55,22 +55,22 @@ impl Query for BoostQuery {
|
|||||||
|
|
||||||
pub(crate) struct BoostWeight {
|
pub(crate) struct BoostWeight {
|
||||||
weight: Box<dyn Weight>,
|
weight: Box<dyn Weight>,
|
||||||
boost: f32,
|
boost: Score,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BoostWeight {
|
impl BoostWeight {
|
||||||
pub fn new(weight: Box<dyn Weight>, boost: f32) -> Self {
|
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self {
|
||||||
BoostWeight { weight, boost }
|
BoostWeight { weight, boost }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Weight for BoostWeight {
|
impl Weight for BoostWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
self.weight.scorer(reader, boost * self.boost)
|
self.weight.scorer(reader, boost * self.boost)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.seek(doc) != doc {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
@@ -88,11 +88,11 @@ impl Weight for BoostWeight {
|
|||||||
|
|
||||||
pub(crate) struct BoostScorer<S: Scorer> {
|
pub(crate) struct BoostScorer<S: Scorer> {
|
||||||
underlying: S,
|
underlying: S,
|
||||||
boost: f32,
|
boost: Score,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: Scorer> BoostScorer<S> {
|
impl<S: Scorer> BoostScorer<S> {
|
||||||
pub fn new(underlying: S, boost: f32) -> BoostScorer<S> {
|
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> {
|
||||||
BoostScorer { underlying, boost }
|
BoostScorer { underlying, boost }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -128,7 +128,7 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<S: Scorer> Scorer for BoostScorer<S> {
|
impl<S: Scorer> Scorer for BoostScorer<S> {
|
||||||
fn score(&mut self) -> f32 {
|
fn score(&mut self) -> Score {
|
||||||
self.underlying.score() * self.boost
|
self.underlying.score() * self.boost
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -144,7 +144,7 @@ mod tests {
|
|||||||
fn test_boost_query_explain() {
|
fn test_boost_query_explain() {
|
||||||
let schema = Schema::builder().build();
|
let schema = Schema::builder().build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(Document::new());
|
index_writer.add_document(Document::new());
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ impl Query for EmptyQuery {
|
|||||||
/// It is useful for tests and handling edge cases.
|
/// It is useful for tests and handling edge cases.
|
||||||
pub struct EmptyWeight;
|
pub struct EmptyWeight;
|
||||||
impl Weight for EmptyWeight {
|
impl Weight for EmptyWeight {
|
||||||
fn scorer(&self, _reader: &SegmentReader, _boost: f32) -> crate::Result<Box<dyn Scorer>> {
|
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
Ok(Box::new(EmptyScorer))
|
Ok(Box::new(EmptyScorer))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,7 +64,7 @@ impl DocSet for EmptyScorer {
|
|||||||
|
|
||||||
impl Scorer for EmptyScorer {
|
impl Scorer for EmptyScorer {
|
||||||
fn score(&mut self) -> Score {
|
fn score(&mut self) -> Score {
|
||||||
0f32
|
0.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,11 @@ use crate::query::Scorer;
|
|||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn is_within<TDocSetExclude: DocSet>(docset: &mut TDocSetExclude, doc: DocId) -> bool {
|
||||||
|
docset.doc() <= doc && docset.seek(doc) == doc
|
||||||
|
}
|
||||||
|
|
||||||
/// Filters a given `DocSet` by removing the docs from a given `DocSet`.
|
/// Filters a given `DocSet` by removing the docs from a given `DocSet`.
|
||||||
///
|
///
|
||||||
/// The excluding docset has no impact on scoring.
|
/// The excluding docset has no impact on scoring.
|
||||||
@@ -23,8 +28,7 @@ where
|
|||||||
) -> Exclude<TDocSet, TDocSetExclude> {
|
) -> Exclude<TDocSet, TDocSetExclude> {
|
||||||
while underlying_docset.doc() != TERMINATED {
|
while underlying_docset.doc() != TERMINATED {
|
||||||
let target = underlying_docset.doc();
|
let target = underlying_docset.doc();
|
||||||
if excluding_docset.seek(target) != target {
|
if !is_within(&mut excluding_docset, target) {
|
||||||
// this document is not excluded.
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
underlying_docset.advance();
|
underlying_docset.advance();
|
||||||
@@ -36,42 +40,30 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet, TDocSetExclude> Exclude<TDocSet, TDocSetExclude>
|
|
||||||
where
|
|
||||||
TDocSet: DocSet,
|
|
||||||
TDocSetExclude: DocSet,
|
|
||||||
{
|
|
||||||
/// Returns true iff the doc is not removed.
|
|
||||||
///
|
|
||||||
/// The method has to be called with non strictly
|
|
||||||
/// increasing `doc`.
|
|
||||||
fn accept(&mut self) -> bool {
|
|
||||||
let doc = self.underlying_docset.doc();
|
|
||||||
self.excluding_docset.seek(doc) != doc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TDocSet, TDocSetExclude> DocSet for Exclude<TDocSet, TDocSetExclude>
|
impl<TDocSet, TDocSetExclude> DocSet for Exclude<TDocSet, TDocSetExclude>
|
||||||
where
|
where
|
||||||
TDocSet: DocSet,
|
TDocSet: DocSet,
|
||||||
TDocSetExclude: DocSet,
|
TDocSetExclude: DocSet,
|
||||||
{
|
{
|
||||||
fn advance(&mut self) -> DocId {
|
fn advance(&mut self) -> DocId {
|
||||||
while self.underlying_docset.advance() != TERMINATED {
|
loop {
|
||||||
if self.accept() {
|
let candidate = self.underlying_docset.advance();
|
||||||
return self.doc();
|
if candidate == TERMINATED {
|
||||||
|
return TERMINATED;
|
||||||
|
}
|
||||||
|
if !is_within(&mut self.excluding_docset, candidate) {
|
||||||
|
return candidate;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TERMINATED
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
let underlying_seek_result = self.underlying_docset.seek(target);
|
let candidate = self.underlying_docset.seek(target);
|
||||||
if underlying_seek_result == TERMINATED {
|
if candidate == TERMINATED {
|
||||||
return TERMINATED;
|
return TERMINATED;
|
||||||
}
|
}
|
||||||
if self.accept() {
|
if !is_within(&mut self.excluding_docset, candidate) {
|
||||||
return underlying_seek_result;
|
return candidate;
|
||||||
}
|
}
|
||||||
self.advance()
|
self.advance()
|
||||||
}
|
}
|
||||||
@@ -129,7 +121,7 @@ mod tests {
|
|||||||
VecDocSet::from(vec![1, 2, 3, 10, 16, 24]),
|
VecDocSet::from(vec![1, 2, 3, 10, 16, 24]),
|
||||||
))
|
))
|
||||||
},
|
},
|
||||||
vec![1, 2, 5, 8, 10, 15, 24],
|
vec![5, 8, 10, 15, 24],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use crate::{DocId, TantivyError};
|
use crate::{DocId, Score, TantivyError};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
|
pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
|
||||||
TantivyError::InvalidArgument(format!("Document #({}) does not match", doc))
|
TantivyError::InvalidArgument(format!("Document #({}) does not match", doc))
|
||||||
@@ -12,15 +13,21 @@ pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
|
|||||||
/// representation of this tree when debugging a given score.
|
/// representation of this tree when debugging a given score.
|
||||||
#[derive(Clone, Serialize)]
|
#[derive(Clone, Serialize)]
|
||||||
pub struct Explanation {
|
pub struct Explanation {
|
||||||
value: f32,
|
value: Score,
|
||||||
description: String,
|
description: String,
|
||||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||||
details: Vec<Explanation>,
|
details: Vec<Explanation>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Explanation {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "Explanation({})", self.to_pretty_json())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Explanation {
|
impl Explanation {
|
||||||
/// Creates a new explanation object.
|
/// Creates a new explanation object.
|
||||||
pub fn new<T: ToString>(description: T, value: f32) -> Explanation {
|
pub fn new<T: ToString>(description: T, value: Score) -> Explanation {
|
||||||
Explanation {
|
Explanation {
|
||||||
value,
|
value,
|
||||||
description: description.to_string(),
|
description: description.to_string(),
|
||||||
@@ -29,7 +36,7 @@ impl Explanation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the value associated to the current node.
|
/// Returns the value associated to the current node.
|
||||||
pub fn value(&self) -> f32 {
|
pub fn value(&self) -> Score {
|
||||||
self.value
|
self.value
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,7 +48,7 @@ impl Explanation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Shortcut for `self.details.push(Explanation::new(name, value));`
|
/// Shortcut for `self.details.push(Explanation::new(name, value));`
|
||||||
pub fn add_const<T: ToString>(&mut self, name: T, value: f32) {
|
pub fn add_const<T: ToString>(&mut self, name: T, value: Score) {
|
||||||
self.details.push(Explanation::new(name, value));
|
self.details.push(Explanation::new(name, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -163,10 +163,10 @@ impl Query for FuzzyTermQuery {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::FuzzyTermQuery;
|
use super::FuzzyTermQuery;
|
||||||
|
use crate::assert_nearly_equals;
|
||||||
use crate::collector::TopDocs;
|
use crate::collector::TopDocs;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::TEXT;
|
use crate::schema::TEXT;
|
||||||
use crate::tests::assert_nearly_equals;
|
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
|
|
||||||
@@ -177,7 +177,7 @@ mod test {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
country_field => "japan",
|
country_field => "japan",
|
||||||
));
|
));
|
||||||
@@ -199,7 +199,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
|
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
|
||||||
let (score, _) = top_docs[0];
|
let (score, _) = top_docs[0];
|
||||||
assert_nearly_equals(1f32, score);
|
assert_nearly_equals!(1.0, score);
|
||||||
}
|
}
|
||||||
|
|
||||||
// fails because non-prefix Levenshtein distance is more than 1 (add 'a' and 'n')
|
// fails because non-prefix Levenshtein distance is more than 1 (add 'a' and 'n')
|
||||||
@@ -223,7 +223,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
|
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
|
||||||
let (score, _) = top_docs[0];
|
let (score, _) = top_docs[0];
|
||||||
assert_nearly_equals(1f32, score);
|
assert_nearly_equals!(1.0, score);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,8 @@ pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>>
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn go_to_first_doc<TDocSet: DocSet>(docsets: &mut [TDocSet]) -> DocId {
|
fn go_to_first_doc<TDocSet: DocSet>(docsets: &mut [TDocSet]) -> DocId {
|
||||||
let mut candidate = 0;
|
assert!(!docsets.is_empty());
|
||||||
|
let mut candidate = docsets.iter().map(TDocSet::doc).max().unwrap();
|
||||||
'outer: loop {
|
'outer: loop {
|
||||||
for docset in docsets.iter_mut() {
|
for docset in docsets.iter_mut() {
|
||||||
let seek_doc = docset.seek(candidate);
|
let seek_doc = docset.seek(candidate);
|
||||||
@@ -118,7 +119,9 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
|
|||||||
continue 'outer;
|
continue 'outer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
debug_assert_eq!(candidate, self.left.doc());
|
||||||
|
debug_assert_eq!(candidate, self.right.doc());
|
||||||
|
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
|
||||||
return candidate;
|
return candidate;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -129,7 +132,10 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
|
|||||||
for docset in &mut self.others {
|
for docset in &mut self.others {
|
||||||
docsets.push(docset);
|
docsets.push(docset);
|
||||||
}
|
}
|
||||||
go_to_first_doc(&mut docsets[..])
|
let doc = go_to_first_doc(&mut docsets[..]);
|
||||||
|
debug_assert!(docsets.iter().all(|docset| docset.doc() == doc));
|
||||||
|
debug_assert!(doc >= target);
|
||||||
|
doc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doc(&self) -> DocId {
|
fn doc(&self) -> DocId {
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ mod weight;
|
|||||||
mod vec_docset;
|
mod vec_docset;
|
||||||
|
|
||||||
pub(crate) mod score_combiner;
|
pub(crate) mod score_combiner;
|
||||||
|
pub(crate) use self::bm25::BM25Weight;
|
||||||
pub use self::intersection::Intersection;
|
pub use self::intersection::Intersection;
|
||||||
pub use self::union::Union;
|
pub use self::union::Union;
|
||||||
|
|
||||||
|
|||||||
@@ -10,12 +10,13 @@ pub use self::phrase_weight::PhraseWeight;
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::assert_nearly_equals;
|
||||||
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
|
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
|
use crate::query::Weight;
|
||||||
use crate::schema::{Schema, Term, TEXT};
|
use crate::schema::{Schema, Term, TEXT};
|
||||||
use crate::tests::assert_nearly_equals;
|
|
||||||
use crate::DocAddress;
|
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use crate::{DocAddress, TERMINATED};
|
||||||
|
|
||||||
pub fn create_index(texts: &[&'static str]) -> Index {
|
pub fn create_index(texts: &[&'static str]) -> Index {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -23,7 +24,7 @@ pub mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for &text in texts {
|
for &text in texts {
|
||||||
let doc = doc!(text_field=>text);
|
let doc = doc!(text_field=>text);
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
@@ -67,6 +68,23 @@ pub mod tests {
|
|||||||
assert!(test_query(vec!["g", "a"]).is_empty());
|
assert!(test_query(vec!["g", "a"]).is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_phrase_query_simple() -> crate::Result<()> {
|
||||||
|
let index = create_index(&["a b b d c g c", "a b a b c"]);
|
||||||
|
let text_field = index.schema().get_field("text").unwrap();
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let terms: Vec<Term> = vec!["a", "b", "c"]
|
||||||
|
.iter()
|
||||||
|
.map(|text| Term::from_field_text(text_field, text))
|
||||||
|
.collect();
|
||||||
|
let phrase_query = PhraseQuery::new(terms);
|
||||||
|
let phrase_weight = phrase_query.phrase_weight(&searcher, false)?;
|
||||||
|
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0)?;
|
||||||
|
assert_eq!(phrase_scorer.doc(), 1);
|
||||||
|
assert_eq!(phrase_scorer.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_phrase_query_no_score() {
|
pub fn test_phrase_query_no_score() {
|
||||||
let index = create_index(&[
|
let index = create_index(&[
|
||||||
@@ -117,7 +135,7 @@ pub mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
@@ -157,8 +175,8 @@ pub mod tests {
|
|||||||
.to_vec()
|
.to_vec()
|
||||||
};
|
};
|
||||||
let scores = test_query(vec!["a", "b"]);
|
let scores = test_query(vec!["a", "b"]);
|
||||||
assert_nearly_equals(scores[0], 0.40618482);
|
assert_nearly_equals!(scores[0], 0.40618482);
|
||||||
assert_nearly_equals(scores[1], 0.46844664);
|
assert_nearly_equals!(scores[1], 0.46844664);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test] // motivated by #234
|
#[test] // motivated by #234
|
||||||
@@ -168,7 +186,7 @@ pub mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"b"));
|
index_writer.add_document(doc!(text_field=>"b"));
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
index_writer.add_document(doc!(text_field=>"b a"));
|
index_writer.add_document(doc!(text_field=>"b a"));
|
||||||
@@ -199,7 +217,7 @@ pub mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a b c d e f g h"));
|
index_writer.add_document(doc!(text_field=>"a b c d e f g h"));
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use crate::fieldnorm::FieldNormReader;
|
|||||||
use crate::postings::Postings;
|
use crate::postings::Postings;
|
||||||
use crate::query::bm25::BM25Weight;
|
use crate::query::bm25::BM25Weight;
|
||||||
use crate::query::{Intersection, Scorer};
|
use crate::query::{Intersection, Scorer};
|
||||||
use crate::DocId;
|
use crate::{DocId, Score};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
struct PostingsWithOffset<TPostings> {
|
struct PostingsWithOffset<TPostings> {
|
||||||
@@ -239,6 +239,7 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn seek(&mut self, target: DocId) -> DocId {
|
fn seek(&mut self, target: DocId) -> DocId {
|
||||||
|
debug_assert!(target >= self.doc());
|
||||||
let doc = self.intersection_docset.seek(target);
|
let doc = self.intersection_docset.seek(target);
|
||||||
if doc == TERMINATED || self.phrase_match() {
|
if doc == TERMINATED || self.phrase_match() {
|
||||||
return doc;
|
return doc;
|
||||||
@@ -256,7 +257,7 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
|
impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
|
||||||
fn score(&mut self) -> f32 {
|
fn score(&mut self) -> Score {
|
||||||
let doc = self.doc();
|
let doc = self.doc();
|
||||||
let fieldnorm_id = self.fieldnorm_reader.fieldnorm_id(doc);
|
let fieldnorm_id = self.fieldnorm_reader.fieldnorm_id(doc);
|
||||||
self.similarity_weight
|
self.similarity_weight
|
||||||
@@ -266,7 +267,6 @@ impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::{intersection, intersection_count};
|
use super::{intersection, intersection_count};
|
||||||
|
|
||||||
fn test_intersection_sym(left: &[u32], right: &[u32], expected: &[u32]) {
|
fn test_intersection_sym(left: &[u32], right: &[u32], expected: &[u32]) {
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use crate::query::Weight;
|
|||||||
use crate::query::{EmptyScorer, Explanation};
|
use crate::query::{EmptyScorer, Explanation};
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::Result;
|
use crate::Score;
|
||||||
use crate::{DocId, DocSet};
|
use crate::{DocId, DocSet};
|
||||||
|
|
||||||
pub struct PhraseWeight {
|
pub struct PhraseWeight {
|
||||||
@@ -32,7 +32,7 @@ impl PhraseWeight {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fieldnorm_reader(&self, reader: &SegmentReader) -> FieldNormReader {
|
fn fieldnorm_reader(&self, reader: &SegmentReader) -> crate::Result<FieldNormReader> {
|
||||||
let field = self.phrase_terms[0].1.field();
|
let field = self.phrase_terms[0].1.field();
|
||||||
reader.get_fieldnorms_reader(field)
|
reader.get_fieldnorms_reader(field)
|
||||||
}
|
}
|
||||||
@@ -40,10 +40,10 @@ impl PhraseWeight {
|
|||||||
fn phrase_scorer(
|
fn phrase_scorer(
|
||||||
&self,
|
&self,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
boost: f32,
|
boost: Score,
|
||||||
) -> Result<Option<PhraseScorer<SegmentPostings>>> {
|
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> {
|
||||||
let similarity_weight = self.similarity_weight.boost_by(boost);
|
let similarity_weight = self.similarity_weight.boost_by(boost);
|
||||||
let fieldnorm_reader = self.fieldnorm_reader(reader);
|
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
|
||||||
if reader.has_deletes() {
|
if reader.has_deletes() {
|
||||||
let mut term_postings_list = Vec::new();
|
let mut term_postings_list = Vec::new();
|
||||||
for &(offset, ref term) in &self.phrase_terms {
|
for &(offset, ref term) in &self.phrase_terms {
|
||||||
@@ -85,7 +85,7 @@ impl PhraseWeight {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Weight for PhraseWeight {
|
impl Weight for PhraseWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||||
Ok(Box::new(scorer))
|
Ok(Box::new(scorer))
|
||||||
} else {
|
} else {
|
||||||
@@ -93,8 +93,8 @@ impl Weight for PhraseWeight {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let scorer_opt = self.phrase_scorer(reader, 1.0f32)?;
|
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
|
||||||
if scorer_opt.is_none() {
|
if scorer_opt.is_none() {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
@@ -102,7 +102,7 @@ impl Weight for PhraseWeight {
|
|||||||
if scorer.seek(doc) != doc {
|
if scorer.seek(doc) != doc {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
let fieldnorm_reader = self.fieldnorm_reader(reader);
|
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
|
||||||
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
|
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
|
||||||
let phrase_count = scorer.phrase_count();
|
let phrase_count = scorer.phrase_count();
|
||||||
let mut explanation = Explanation::new("Phrase Scorer", scorer.score());
|
let mut explanation = Explanation::new("Phrase Scorer", scorer.score());
|
||||||
@@ -130,7 +130,7 @@ mod tests {
|
|||||||
]);
|
]);
|
||||||
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
|
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
|
||||||
let mut phrase_scorer = phrase_weight
|
let mut phrase_scorer = phrase_weight
|
||||||
.phrase_scorer(searcher.segment_reader(0u32), 1.0f32)
|
.phrase_scorer(searcher.segment_reader(0u32), 1.0)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(phrase_scorer.doc(), 1);
|
assert_eq!(phrase_scorer.doc(), 1);
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ use std::fmt;
|
|||||||
///
|
///
|
||||||
/// When implementing a new type of `Query`, it is normal to implement a
|
/// When implementing a new type of `Query`, it is normal to implement a
|
||||||
/// dedicated `Query`, `Weight` and `Scorer`.
|
/// dedicated `Query`, `Weight` and `Scorer`.
|
||||||
pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug {
|
pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
|
||||||
/// Create the weight associated to a query.
|
/// Create the weight associated to a query.
|
||||||
///
|
///
|
||||||
/// If scoring is not required, setting `scoring_enabled` to `false`
|
/// If scoring is not required, setting `scoring_enabled` to `false`
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use crate::query::Occur;
|
|||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
|
use crate::Score;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::ops::Bound;
|
use std::ops::Bound;
|
||||||
|
|
||||||
@@ -21,12 +22,12 @@ pub enum LogicalLiteral {
|
|||||||
pub enum LogicalAST {
|
pub enum LogicalAST {
|
||||||
Clause(Vec<(Occur, LogicalAST)>),
|
Clause(Vec<(Occur, LogicalAST)>),
|
||||||
Leaf(Box<LogicalLiteral>),
|
Leaf(Box<LogicalLiteral>),
|
||||||
Boost(Box<LogicalAST>, f32),
|
Boost(Box<LogicalAST>, Score),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LogicalAST {
|
impl LogicalAST {
|
||||||
pub fn boost(self, boost: f32) -> LogicalAST {
|
pub fn boost(self, boost: Score) -> LogicalAST {
|
||||||
if (boost - 1.0f32).abs() < std::f32::EPSILON {
|
if (boost - 1.0).abs() < Score::EPSILON {
|
||||||
self
|
self
|
||||||
} else {
|
} else {
|
||||||
LogicalAST::Boost(Box::new(self), boost)
|
LogicalAST::Boost(Box::new(self), boost)
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ use crate::schema::{Facet, IndexRecordOption};
|
|||||||
use crate::schema::{Field, Schema};
|
use crate::schema::{Field, Schema};
|
||||||
use crate::schema::{FieldType, Term};
|
use crate::schema::{FieldType, Term};
|
||||||
use crate::tokenizer::TokenizerManager;
|
use crate::tokenizer::TokenizerManager;
|
||||||
|
use crate::Score;
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::num::{ParseFloatError, ParseIntError};
|
use std::num::{ParseFloatError, ParseIntError};
|
||||||
@@ -113,7 +114,7 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
|
|||||||
/// The language covered by the current parser is extremely simple.
|
/// The language covered by the current parser is extremely simple.
|
||||||
///
|
///
|
||||||
/// * simple terms: "e.g.: `Barack Obama` are simply tokenized using
|
/// * simple terms: "e.g.: `Barack Obama` are simply tokenized using
|
||||||
/// tantivy's [`SimpleTokenizer`](tantivy::tokenizer::SimpleTokenizer), hence
|
/// tantivy's [`SimpleTokenizer`](../tokenizer/struct.SimpleTokenizer.html), hence
|
||||||
/// becoming `["barack", "obama"]`. The terms are then searched within
|
/// becoming `["barack", "obama"]`. The terms are then searched within
|
||||||
/// the default terms of the query parser.
|
/// the default terms of the query parser.
|
||||||
///
|
///
|
||||||
@@ -172,7 +173,7 @@ pub struct QueryParser {
|
|||||||
default_fields: Vec<Field>,
|
default_fields: Vec<Field>,
|
||||||
conjunction_by_default: bool,
|
conjunction_by_default: bool,
|
||||||
tokenizer_manager: TokenizerManager,
|
tokenizer_manager: TokenizerManager,
|
||||||
boost: HashMap<Field, f32>,
|
boost: HashMap<Field, Score>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn all_negative(ast: &LogicalAST) -> bool {
|
fn all_negative(ast: &LogicalAST) -> bool {
|
||||||
@@ -228,7 +229,7 @@ impl QueryParser {
|
|||||||
/// If the query defines a query boost through the query language (e.g: `country:France^3.0`),
|
/// If the query defines a query boost through the query language (e.g: `country:France^3.0`),
|
||||||
/// the two boosts (the one defined in the query, and the one defined in the `QueryParser`)
|
/// the two boosts (the one defined in the query, and the one defined in the `QueryParser`)
|
||||||
/// are multiplied together.
|
/// are multiplied together.
|
||||||
pub fn set_field_boost(&mut self, field: Field, boost: f32) {
|
pub fn set_field_boost(&mut self, field: Field, boost: Score) {
|
||||||
self.boost.insert(field, boost);
|
self.boost.insert(field, boost);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,14 +441,14 @@ impl QueryParser {
|
|||||||
}
|
}
|
||||||
UserInputAST::Boost(ast, boost) => {
|
UserInputAST::Boost(ast, boost) => {
|
||||||
let ast = self.compute_logical_ast_with_occur(*ast)?;
|
let ast = self.compute_logical_ast_with_occur(*ast)?;
|
||||||
Ok(ast.boost(boost))
|
Ok(ast.boost(boost as Score))
|
||||||
}
|
}
|
||||||
UserInputAST::Leaf(leaf) => self.compute_logical_ast_from_leaf(*leaf),
|
UserInputAST::Leaf(leaf) => self.compute_logical_ast_from_leaf(*leaf),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn field_boost(&self, field: Field) -> f32 {
|
fn field_boost(&self, field: Field) -> Score {
|
||||||
self.boost.get(&field).cloned().unwrap_or(1.0f32)
|
self.boost.get(&field).cloned().unwrap_or(1.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_logical_ast_from_leaf(
|
fn compute_logical_ast_from_leaf(
|
||||||
@@ -658,7 +659,7 @@ mod test {
|
|||||||
let mut query_parser = make_query_parser();
|
let mut query_parser = make_query_parser();
|
||||||
let schema = make_schema();
|
let schema = make_schema();
|
||||||
let text_field = schema.get_field("text").unwrap();
|
let text_field = schema.get_field("text").unwrap();
|
||||||
query_parser.set_field_boost(text_field, 2.0f32);
|
query_parser.set_field_boost(text_field, 2.0);
|
||||||
let query = query_parser.parse_query("text:hello").unwrap();
|
let query = query_parser.parse_query("text:hello").unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", query),
|
format!("{:?}", query),
|
||||||
@@ -671,7 +672,7 @@ mod test {
|
|||||||
let mut query_parser = make_query_parser();
|
let mut query_parser = make_query_parser();
|
||||||
let schema = make_schema();
|
let schema = make_schema();
|
||||||
let title_field = schema.get_field("title").unwrap();
|
let title_field = schema.get_field("title").unwrap();
|
||||||
query_parser.set_field_boost(title_field, 2.0f32);
|
query_parser.set_field_boost(title_field, 2.0);
|
||||||
let query = query_parser.parse_query("title:[A TO B]").unwrap();
|
let query = query_parser.parse_query("title:[A TO B]").unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", query),
|
format!("{:?}", query),
|
||||||
@@ -684,7 +685,7 @@ mod test {
|
|||||||
let mut query_parser = make_query_parser();
|
let mut query_parser = make_query_parser();
|
||||||
let schema = make_schema();
|
let schema = make_schema();
|
||||||
let text_field = schema.get_field("text").unwrap();
|
let text_field = schema.get_field("text").unwrap();
|
||||||
query_parser.set_field_boost(text_field, 2.0f32);
|
query_parser.set_field_boost(text_field, 2.0);
|
||||||
let query = query_parser.parse_query("text:hello^2").unwrap();
|
let query = query_parser.parse_query("text:hello^2").unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", query),
|
format!("{:?}", query),
|
||||||
|
|||||||
@@ -9,8 +9,7 @@ use crate::query::{Query, Scorer, Weight};
|
|||||||
use crate::schema::Type;
|
use crate::schema::Type;
|
||||||
use crate::schema::{Field, IndexRecordOption, Term};
|
use crate::schema::{Field, IndexRecordOption, Term};
|
||||||
use crate::termdict::{TermDictionary, TermStreamer};
|
use crate::termdict::{TermDictionary, TermStreamer};
|
||||||
use crate::DocId;
|
use crate::{DocId, Score};
|
||||||
use crate::Result;
|
|
||||||
use std::collections::Bound;
|
use std::collections::Bound;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
@@ -48,7 +47,7 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
|||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
///
|
///
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// for year in 1950u64..2017u64 {
|
/// for year in 1950u64..2017u64 {
|
||||||
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
||||||
/// for _ in 0..num_docs_within_year {
|
/// for _ in 0..num_docs_within_year {
|
||||||
@@ -246,7 +245,11 @@ impl RangeQuery {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Query for RangeQuery {
|
impl Query for RangeQuery {
|
||||||
fn weight(&self, searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
|
fn weight(
|
||||||
|
&self,
|
||||||
|
searcher: &Searcher,
|
||||||
|
_scoring_enabled: bool,
|
||||||
|
) -> crate::Result<Box<dyn Weight>> {
|
||||||
let schema = searcher.schema();
|
let schema = searcher.schema();
|
||||||
let value_type = schema.get_field_entry(self.field).field_type().value_type();
|
let value_type = schema.get_field_entry(self.field).field_type().value_type();
|
||||||
if value_type != self.value_type {
|
if value_type != self.value_type {
|
||||||
@@ -289,7 +292,7 @@ impl RangeWeight {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Weight for RangeWeight {
|
impl Weight for RangeWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let max_doc = reader.max_doc();
|
let max_doc = reader.max_doc();
|
||||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||||
|
|
||||||
@@ -301,24 +304,26 @@ impl Weight for RangeWeight {
|
|||||||
let mut block_segment_postings = inverted_index
|
let mut block_segment_postings = inverted_index
|
||||||
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
|
||||||
loop {
|
loop {
|
||||||
|
let docs = block_segment_postings.docs();
|
||||||
|
if docs.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
for &doc in block_segment_postings.docs() {
|
for &doc in block_segment_postings.docs() {
|
||||||
doc_bitset.insert(doc);
|
doc_bitset.insert(doc);
|
||||||
}
|
}
|
||||||
if !block_segment_postings.advance() {
|
block_segment_postings.advance();
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
let doc_bitset = BitSetDocSet::from(doc_bitset);
|
||||||
Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
|
Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.seek(doc) != doc {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
Ok(Explanation::new("RangeQuery", 1.0f32))
|
Ok(Explanation::new("RangeQuery", 1.0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,8 +331,9 @@ impl Weight for RangeWeight {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::RangeQuery;
|
use super::RangeQuery;
|
||||||
use crate::collector::Count;
|
use crate::collector::{Count, TopDocs};
|
||||||
use crate::schema::{Document, Field, Schema, INDEXED};
|
use crate::query::QueryParser;
|
||||||
|
use crate::schema::{Document, Field, Schema, INDEXED, TEXT};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use std::collections::Bound;
|
use std::collections::Bound;
|
||||||
|
|
||||||
@@ -339,7 +345,7 @@ mod tests {
|
|||||||
|
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
for year in 1950u64..2017u64 {
|
for year in 1950u64..2017u64 {
|
||||||
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
||||||
for _ in 0..num_docs_within_year {
|
for _ in 0..num_docs_within_year {
|
||||||
@@ -474,4 +480,28 @@ mod tests {
|
|||||||
91
|
91
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bug_reproduce_range_query() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
schema_builder.add_text_field("title", TEXT);
|
||||||
|
schema_builder.add_i64_field("year", INDEXED);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
let title = schema.get_field("title").unwrap();
|
||||||
|
let year = schema.get_field("year").unwrap();
|
||||||
|
index_writer.add_document(doc!(
|
||||||
|
title => "hemoglobin blood",
|
||||||
|
year => 1990 as i64
|
||||||
|
));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let reader = index.reader()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
let query = query_parser.parse_query("hemoglobin AND year:[1970 TO 1990]")?;
|
||||||
|
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
|
||||||
|
assert_eq!(top_docs.len(), 1);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,10 +89,10 @@ impl Query for RegexQuery {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::RegexQuery;
|
use super::RegexQuery;
|
||||||
|
use crate::assert_nearly_equals;
|
||||||
use crate::collector::TopDocs;
|
use crate::collector::TopDocs;
|
||||||
use crate::schema::TEXT;
|
use crate::schema::TEXT;
|
||||||
use crate::schema::{Field, Schema};
|
use crate::schema::{Field, Schema};
|
||||||
use crate::tests::assert_nearly_equals;
|
|
||||||
use crate::{Index, IndexReader};
|
use crate::{Index, IndexReader};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tantivy_fst::Regex;
|
use tantivy_fst::Regex;
|
||||||
@@ -103,7 +103,7 @@ mod test {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
country_field => "japan",
|
country_field => "japan",
|
||||||
));
|
));
|
||||||
@@ -129,7 +129,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
|
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
|
||||||
let (score, _) = scored_docs[0];
|
let (score, _) = scored_docs[0];
|
||||||
assert_nearly_equals(1f32, score);
|
assert_nearly_equals!(1.0, score);
|
||||||
}
|
}
|
||||||
let top_docs = searcher
|
let top_docs = searcher
|
||||||
.search(&query_matching_zero, &TopDocs::with_limit(2))
|
.search(&query_matching_zero, &TopDocs::with_limit(2))
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ where
|
|||||||
let doc = self.doc();
|
let doc = self.doc();
|
||||||
let mut score_combiner = TScoreCombiner::default();
|
let mut score_combiner = TScoreCombiner::default();
|
||||||
score_combiner.update(&mut self.req_scorer);
|
score_combiner.update(&mut self.req_scorer);
|
||||||
if self.opt_scorer.seek(doc) == doc {
|
if self.opt_scorer.doc() <= doc && self.opt_scorer.seek(doc) == doc {
|
||||||
score_combiner.update(&mut self.opt_scorer);
|
score_combiner.update(&mut self.opt_scorer);
|
||||||
}
|
}
|
||||||
let score = score_combiner.score();
|
let score = score_combiner.score();
|
||||||
@@ -112,47 +112,47 @@ mod tests {
|
|||||||
fn test_reqopt_scorer() {
|
fn test_reqopt_scorer() {
|
||||||
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
|
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
|
||||||
RequiredOptionalScorer::new(
|
RequiredOptionalScorer::new(
|
||||||
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0f32),
|
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0),
|
||||||
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0f32),
|
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0),
|
||||||
);
|
);
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.doc(), 1);
|
assert_eq!(reqoptscorer.doc(), 1);
|
||||||
assert_eq!(reqoptscorer.score(), 2f32);
|
assert_eq!(reqoptscorer.score(), 2.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 3);
|
assert_eq!(reqoptscorer.advance(), 3);
|
||||||
assert_eq!(reqoptscorer.doc(), 3);
|
assert_eq!(reqoptscorer.doc(), 3);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 7);
|
assert_eq!(reqoptscorer.advance(), 7);
|
||||||
assert_eq!(reqoptscorer.doc(), 7);
|
assert_eq!(reqoptscorer.doc(), 7);
|
||||||
assert_eq!(reqoptscorer.score(), 2f32);
|
assert_eq!(reqoptscorer.score(), 2.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 8);
|
assert_eq!(reqoptscorer.advance(), 8);
|
||||||
assert_eq!(reqoptscorer.doc(), 8);
|
assert_eq!(reqoptscorer.doc(), 8);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 9);
|
assert_eq!(reqoptscorer.advance(), 9);
|
||||||
assert_eq!(reqoptscorer.doc(), 9);
|
assert_eq!(reqoptscorer.doc(), 9);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 10);
|
assert_eq!(reqoptscorer.advance(), 10);
|
||||||
assert_eq!(reqoptscorer.doc(), 10);
|
assert_eq!(reqoptscorer.doc(), 10);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 13);
|
assert_eq!(reqoptscorer.advance(), 13);
|
||||||
assert_eq!(reqoptscorer.doc(), 13);
|
assert_eq!(reqoptscorer.doc(), 13);
|
||||||
assert_eq!(reqoptscorer.score(), 1f32);
|
assert_eq!(reqoptscorer.score(), 1.0);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assert_eq!(reqoptscorer.advance(), 15);
|
assert_eq!(reqoptscorer.advance(), 15);
|
||||||
assert_eq!(reqoptscorer.doc(), 15);
|
assert_eq!(reqoptscorer.doc(), 15);
|
||||||
assert_eq!(reqoptscorer.score(), 2f32);
|
assert_eq!(reqoptscorer.score(), 2.0);
|
||||||
}
|
}
|
||||||
assert_eq!(reqoptscorer.advance(), TERMINATED);
|
assert_eq!(reqoptscorer.advance(), TERMINATED);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ impl ScoreCombiner for DoNothingCombiner {
|
|||||||
fn clear(&mut self) {}
|
fn clear(&mut self) {}
|
||||||
|
|
||||||
fn score(&self) -> Score {
|
fn score(&self) -> Score {
|
||||||
1f32
|
1.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,7 +47,7 @@ impl ScoreCombiner for SumCombiner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn clear(&mut self) {
|
fn clear(&mut self) {
|
||||||
self.score = 0f32;
|
self.score = 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn score(&self) -> Score {
|
fn score(&self) -> Score {
|
||||||
@@ -70,7 +70,7 @@ impl ScoreCombiner for SumWithCoordsCombiner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn clear(&mut self) {
|
fn clear(&mut self) {
|
||||||
self.score = 0f32;
|
self.score = 0.0;
|
||||||
self.num_fields = 0;
|
self.num_fields = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,14 +35,14 @@ pub struct ConstScorer<TDocSet: DocSet> {
|
|||||||
|
|
||||||
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
|
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
|
||||||
/// Creates a new `ConstScorer`.
|
/// Creates a new `ConstScorer`.
|
||||||
pub fn new(docset: TDocSet, score: f32) -> ConstScorer<TDocSet> {
|
pub fn new(docset: TDocSet, score: Score) -> ConstScorer<TDocSet> {
|
||||||
ConstScorer { docset, score }
|
ConstScorer { docset, score }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
|
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
|
||||||
fn from(docset: TDocSet) -> Self {
|
fn from(docset: TDocSet) -> Self {
|
||||||
ConstScorer::new(docset, 1.0f32)
|
ConstScorer::new(docset, 1.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,13 +9,13 @@ pub use self::term_weight::TermWeight;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
use crate::assert_nearly_equals;
|
||||||
use crate::collector::TopDocs;
|
use crate::collector::TopDocs;
|
||||||
use crate::docset::DocSet;
|
use crate::docset::DocSet;
|
||||||
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
use crate::query::{Query, QueryParser, Scorer, TermQuery};
|
use crate::query::{Query, QueryParser, Scorer, TermQuery};
|
||||||
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
|
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
|
||||||
use crate::tests::assert_nearly_equals;
|
use crate::{Index, Term, TERMINATED};
|
||||||
use crate::Index;
|
|
||||||
use crate::Term;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_term_query_no_freq() {
|
pub fn test_term_query_no_freq() {
|
||||||
@@ -25,7 +25,7 @@ mod tests {
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
let doc = doc!(text_field => "a");
|
let doc = doc!(text_field => "a");
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
assert!(index_writer.commit().is_ok());
|
assert!(index_writer.commit().is_ok());
|
||||||
@@ -37,9 +37,44 @@ mod tests {
|
|||||||
);
|
);
|
||||||
let term_weight = term_query.weight(&searcher, true).unwrap();
|
let term_weight = term_query.weight(&searcher, true).unwrap();
|
||||||
let segment_reader = searcher.segment_reader(0);
|
let segment_reader = searcher.segment_reader(0);
|
||||||
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32).unwrap();
|
let mut term_scorer = term_weight.scorer(segment_reader, 1.0).unwrap();
|
||||||
assert_eq!(term_scorer.doc(), 0);
|
assert_eq!(term_scorer.doc(), 0);
|
||||||
assert_eq!(term_scorer.score(), 0.28768212);
|
assert_nearly_equals!(term_scorer.score(), 0.28768212);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_term_query_multiple_of_block_len() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", STRING);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
{
|
||||||
|
// writing the segment
|
||||||
|
let mut index_writer = index.writer_for_tests()?;
|
||||||
|
for _ in 0..COMPRESSION_BLOCK_SIZE {
|
||||||
|
let doc = doc!(text_field => "a");
|
||||||
|
index_writer.add_document(doc);
|
||||||
|
}
|
||||||
|
index_writer.commit()?;
|
||||||
|
}
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let term_query = TermQuery::new(
|
||||||
|
Term::from_field_text(text_field, "a"),
|
||||||
|
IndexRecordOption::Basic,
|
||||||
|
);
|
||||||
|
let term_weight = term_query.weight(&searcher, true)?;
|
||||||
|
let segment_reader = searcher.segment_reader(0);
|
||||||
|
let mut term_scorer = term_weight.scorer(segment_reader, 1.0)?;
|
||||||
|
for i in 0u32..COMPRESSION_BLOCK_SIZE as u32 {
|
||||||
|
assert_eq!(term_scorer.doc(), i);
|
||||||
|
if i == COMPRESSION_BLOCK_SIZE as u32 - 1u32 {
|
||||||
|
assert_eq!(term_scorer.advance(), TERMINATED);
|
||||||
|
} else {
|
||||||
|
assert_eq!(term_scorer.advance(), i + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(term_scorer.doc(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -51,7 +86,7 @@ mod tests {
|
|||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
left_field => "left1 left2 left2 left2f2 left2f2 left3 abcde abcde abcde abcde abcde abcde abcde abcde abcde abcewde abcde abcde",
|
left_field => "left1 left2 left2 left2f2 left2f2 left3 abcde abcde abcde abcde abcde abcde abcde abcde abcde abcewde abcde abcde",
|
||||||
right_field => "right1 right2",
|
right_field => "right1 right2",
|
||||||
@@ -69,7 +104,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(topdocs.len(), 1);
|
assert_eq!(topdocs.len(), 1);
|
||||||
let (score, _) = topdocs[0];
|
let (score, _) = topdocs[0];
|
||||||
assert_nearly_equals(0.77802235, score);
|
assert_nearly_equals!(0.77802235, score);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let term = Term::from_field_text(left_field, "left1");
|
let term = Term::from_field_text(left_field, "left1");
|
||||||
@@ -79,9 +114,9 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(top_docs.len(), 2);
|
assert_eq!(top_docs.len(), 2);
|
||||||
let (score1, _) = top_docs[0];
|
let (score1, _) = top_docs[0];
|
||||||
assert_nearly_equals(0.27101856, score1);
|
assert_nearly_equals!(0.27101856, score1);
|
||||||
let (score2, _) = top_docs[1];
|
let (score2, _) = top_docs[1];
|
||||||
assert_nearly_equals(0.13736556, score2);
|
assert_nearly_equals!(0.13736556, score2);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||||
@@ -89,9 +124,9 @@ mod tests {
|
|||||||
let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||||
assert_eq!(top_docs.len(), 2);
|
assert_eq!(top_docs.len(), 2);
|
||||||
let (score1, _) = top_docs[0];
|
let (score1, _) = top_docs[0];
|
||||||
assert_nearly_equals(0.9153879, score1);
|
assert_nearly_equals!(0.9153879, score1);
|
||||||
let (score2, _) = top_docs[1];
|
let (score2, _) = top_docs[1];
|
||||||
assert_nearly_equals(0.27101856, score2);
|
assert_nearly_equals!(0.27101856, score2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,7 +136,7 @@ mod tests {
|
|||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 5_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field=>"a b"));
|
index_writer.add_document(doc!(text_field=>"a b"));
|
||||||
index_writer.add_document(doc!(text_field=>"a c"));
|
index_writer.add_document(doc!(text_field=>"a c"));
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
||||||
@@ -112,6 +147,27 @@ mod tests {
|
|||||||
assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1);
|
assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_term_query_simple_seek() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
|
index_writer.add_document(doc!(text_field=>"a"));
|
||||||
|
index_writer.commit()?;
|
||||||
|
let term_a = Term::from_field_text(text_field, "a");
|
||||||
|
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
|
||||||
|
let searcher = index.reader()?.searcher();
|
||||||
|
let term_weight = term_query.weight(&searcher, false)?;
|
||||||
|
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
|
||||||
|
assert_eq!(term_scorer.doc(), 0u32);
|
||||||
|
term_scorer.seek(1u32);
|
||||||
|
assert_eq!(term_scorer.doc(), 1u32);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_term_query_debug() {
|
fn test_term_query_debug() {
|
||||||
let term_query = TermQuery::new(
|
let term_query = TermQuery::new(
|
||||||
|
|||||||
@@ -4,10 +4,11 @@ use crate::DocId;
|
|||||||
use crate::Score;
|
use crate::Score;
|
||||||
|
|
||||||
use crate::fieldnorm::FieldNormReader;
|
use crate::fieldnorm::FieldNormReader;
|
||||||
use crate::postings::Postings;
|
|
||||||
use crate::postings::SegmentPostings;
|
use crate::postings::SegmentPostings;
|
||||||
|
use crate::postings::{FreqReadingOption, Postings};
|
||||||
use crate::query::bm25::BM25Weight;
|
use crate::query::bm25::BM25Weight;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct TermScorer {
|
pub struct TermScorer {
|
||||||
postings: SegmentPostings,
|
postings: SegmentPostings,
|
||||||
fieldnorm_reader: FieldNormReader,
|
fieldnorm_reader: FieldNormReader,
|
||||||
@@ -26,9 +27,56 @@ impl TermScorer {
|
|||||||
similarity_weight,
|
similarity_weight,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl TermScorer {
|
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
|
||||||
|
self.postings.block_cursor.shallow_seek(target_doc);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn create_for_test(
|
||||||
|
doc_and_tfs: &[(DocId, u32)],
|
||||||
|
fieldnorms: &[u32],
|
||||||
|
similarity_weight: BM25Weight,
|
||||||
|
) -> TermScorer {
|
||||||
|
assert!(!doc_and_tfs.is_empty());
|
||||||
|
assert!(
|
||||||
|
doc_and_tfs
|
||||||
|
.iter()
|
||||||
|
.map(|(doc, _tf)| *doc)
|
||||||
|
.max()
|
||||||
|
.unwrap_or(0u32)
|
||||||
|
< fieldnorms.len() as u32
|
||||||
|
);
|
||||||
|
let segment_postings =
|
||||||
|
SegmentPostings::create_from_docs_and_tfs(doc_and_tfs, Some(fieldnorms));
|
||||||
|
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
|
||||||
|
TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// See `FreqReadingOption`.
|
||||||
|
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
|
||||||
|
self.postings.block_cursor.freq_reading_option()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the maximum score for the current block.
|
||||||
|
///
|
||||||
|
/// In some rare case, the result may not be exact. In this case a lower value is returned,
|
||||||
|
/// (and may lead us to return a lesser document).
|
||||||
|
///
|
||||||
|
/// At index time, we store the (fieldnorm_id, term frequency) pair that maximizes the
|
||||||
|
/// score assuming the average fieldnorm computed on this segment.
|
||||||
|
///
|
||||||
|
/// Though extremely rare, it is theoretically possible that the actual average fieldnorm
|
||||||
|
/// is different enough from the current segment average fieldnorm that the maximum over a
|
||||||
|
/// specific is achieved on a different document.
|
||||||
|
///
|
||||||
|
/// (The result is on the other hand guaranteed to be correct if there is only one segment).
|
||||||
|
pub fn block_max_score(&mut self) -> Score {
|
||||||
|
self.postings
|
||||||
|
.block_cursor
|
||||||
|
.block_max_score(&self.fieldnorm_reader, &self.similarity_weight)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn term_freq(&self) -> u32 {
|
pub fn term_freq(&self) -> u32 {
|
||||||
self.postings.term_freq()
|
self.postings.term_freq()
|
||||||
}
|
}
|
||||||
@@ -42,6 +90,14 @@ impl TermScorer {
|
|||||||
let term_freq = self.term_freq();
|
let term_freq = self.term_freq();
|
||||||
self.similarity_weight.explain(fieldnorm_id, term_freq)
|
self.similarity_weight.explain(fieldnorm_id, term_freq)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn max_score(&self) -> Score {
|
||||||
|
self.similarity_weight.max_score()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn last_doc_in_block(&self) -> DocId {
|
||||||
|
self.postings.block_cursor.skip_reader.last_doc_in_block()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DocSet for TermScorer {
|
impl DocSet for TermScorer {
|
||||||
@@ -69,3 +125,213 @@ impl Scorer for TermScorer {
|
|||||||
self.similarity_weight.score(fieldnorm_id, term_freq)
|
self.similarity_weight.score(fieldnorm_id, term_freq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::merge_policy::NoMergePolicy;
|
||||||
|
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||||
|
use crate::query::term_query::TermScorer;
|
||||||
|
use crate::query::{BM25Weight, Scorer, TermQuery};
|
||||||
|
use crate::schema::{IndexRecordOption, Schema, TEXT};
|
||||||
|
use crate::Score;
|
||||||
|
use crate::{assert_nearly_equals, Index, Searcher, SegmentId, Term};
|
||||||
|
use crate::{DocId, DocSet, TERMINATED};
|
||||||
|
use futures::executor::block_on;
|
||||||
|
use proptest::prelude::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_term_scorer_max_score() -> crate::Result<()> {
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(3, 6, 10.0);
|
||||||
|
let mut term_scorer = TermScorer::create_for_test(
|
||||||
|
&[(2, 3), (3, 12), (7, 8)],
|
||||||
|
&[0, 0, 10, 12, 0, 0, 0, 100],
|
||||||
|
bm25_weight,
|
||||||
|
);
|
||||||
|
let max_scorer = term_scorer.max_score();
|
||||||
|
crate::assert_nearly_equals!(max_scorer, 1.3990127);
|
||||||
|
assert_eq!(term_scorer.doc(), 2);
|
||||||
|
assert_eq!(term_scorer.term_freq(), 3);
|
||||||
|
assert_nearly_equals!(term_scorer.block_max_score(), 1.3676447);
|
||||||
|
assert_nearly_equals!(term_scorer.score(), 1.0892314);
|
||||||
|
assert_eq!(term_scorer.advance(), 3);
|
||||||
|
assert_eq!(term_scorer.doc(), 3);
|
||||||
|
assert_eq!(term_scorer.term_freq(), 12);
|
||||||
|
assert_nearly_equals!(term_scorer.score(), 1.3676447);
|
||||||
|
assert_eq!(term_scorer.advance(), 7);
|
||||||
|
assert_eq!(term_scorer.doc(), 7);
|
||||||
|
assert_eq!(term_scorer.term_freq(), 8);
|
||||||
|
assert_nearly_equals!(term_scorer.score(), 0.72015285);
|
||||||
|
assert_eq!(term_scorer.advance(), TERMINATED);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_term_scorer_shallow_advance() -> crate::Result<()> {
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(300, 1024, 10.0);
|
||||||
|
let mut doc_and_tfs = vec![];
|
||||||
|
for i in 0u32..300u32 {
|
||||||
|
let doc = i * 10;
|
||||||
|
doc_and_tfs.push((doc, 1u32 + doc % 3u32));
|
||||||
|
}
|
||||||
|
let fieldnorms: Vec<u32> = std::iter::repeat(10u32).take(3_000).collect();
|
||||||
|
let mut term_scorer = TermScorer::create_for_test(&doc_and_tfs, &fieldnorms, bm25_weight);
|
||||||
|
assert_eq!(term_scorer.doc(), 0u32);
|
||||||
|
term_scorer.shallow_seek(1289);
|
||||||
|
assert_eq!(term_scorer.doc(), 0u32);
|
||||||
|
term_scorer.seek(1289);
|
||||||
|
assert_eq!(term_scorer.doc(), 1290);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
proptest! {
|
||||||
|
#[test]
|
||||||
|
fn test_term_scorer_block_max_score(term_freqs_fieldnorms in proptest::collection::vec((1u32..10u32, 0u32..100u32), 80..300)) {
|
||||||
|
let term_doc_freq = term_freqs_fieldnorms.len();
|
||||||
|
let doc_tfs: Vec<(u32, u32)> = term_freqs_fieldnorms.iter()
|
||||||
|
.cloned()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(doc, (tf, _))| (doc as u32, tf))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut fieldnorms: Vec<u32> = vec![];
|
||||||
|
for i in 0..term_doc_freq {
|
||||||
|
let (tf, num_extra_terms) = term_freqs_fieldnorms[i];
|
||||||
|
fieldnorms.push(tf + num_extra_terms);
|
||||||
|
}
|
||||||
|
let average_fieldnorm = fieldnorms
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.sum::<u32>() as Score / term_doc_freq as Score;
|
||||||
|
// Average fieldnorm is over the entire index,
|
||||||
|
// not necessarily the docs that are in the posting list.
|
||||||
|
// For this reason we multiply by 1.1 to make a realistic value.
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(term_doc_freq as u64,
|
||||||
|
term_doc_freq as u64 * 10u64,
|
||||||
|
average_fieldnorm);
|
||||||
|
|
||||||
|
let mut term_scorer =
|
||||||
|
TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
|
||||||
|
|
||||||
|
let docs: Vec<DocId> = (0..term_doc_freq).map(|doc| doc as DocId).collect();
|
||||||
|
for block in docs.chunks(COMPRESSION_BLOCK_SIZE) {
|
||||||
|
let block_max_score: Score = term_scorer.block_max_score();
|
||||||
|
let mut block_max_score_computed: Score = 0.0;
|
||||||
|
for &doc in block {
|
||||||
|
assert_eq!(term_scorer.doc(), doc);
|
||||||
|
block_max_score_computed = block_max_score_computed.max(term_scorer.score());
|
||||||
|
term_scorer.advance();
|
||||||
|
}
|
||||||
|
assert_nearly_equals!(block_max_score_computed, block_max_score);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_block_wand() {
|
||||||
|
let mut doc_tfs: Vec<(u32, u32)> = vec![];
|
||||||
|
for doc in 0u32..128u32 {
|
||||||
|
doc_tfs.push((doc, 1u32));
|
||||||
|
}
|
||||||
|
for doc in 128u32..256u32 {
|
||||||
|
doc_tfs.push((doc, if doc == 200 { 2u32 } else { 1u32 }));
|
||||||
|
}
|
||||||
|
doc_tfs.push((256, 1u32));
|
||||||
|
doc_tfs.push((257, 3u32));
|
||||||
|
doc_tfs.push((258, 1u32));
|
||||||
|
|
||||||
|
let fieldnorms: Vec<u32> = std::iter::repeat(20u32).take(300).collect();
|
||||||
|
let bm25_weight = BM25Weight::for_one_term(10, 129, 20.0);
|
||||||
|
let mut docs = TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
|
||||||
|
assert_nearly_equals!(docs.block_max_score(), 2.5161593);
|
||||||
|
docs.shallow_seek(135);
|
||||||
|
assert_nearly_equals!(docs.block_max_score(), 3.4597192);
|
||||||
|
docs.shallow_seek(256);
|
||||||
|
// the block is not loaded yet.
|
||||||
|
assert_nearly_equals!(docs.block_max_score(), 5.2971773);
|
||||||
|
assert_eq!(256, docs.seek(256));
|
||||||
|
assert_nearly_equals!(docs.block_max_score(), 3.9539647);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
|
||||||
|
let term_weight = term_query.specialized_weight(&searcher, true);
|
||||||
|
for reader in searcher.segment_readers() {
|
||||||
|
let mut block_max_scores = vec![];
|
||||||
|
let mut block_max_scores_b = vec![];
|
||||||
|
let mut docs = vec![];
|
||||||
|
{
|
||||||
|
let mut term_scorer = term_weight.specialized_scorer(reader, 1.0)?;
|
||||||
|
while term_scorer.doc() != TERMINATED {
|
||||||
|
let mut score = term_scorer.score();
|
||||||
|
docs.push(term_scorer.doc());
|
||||||
|
for _ in 0..128 {
|
||||||
|
score = score.max(term_scorer.score());
|
||||||
|
if term_scorer.advance() == TERMINATED {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
block_max_scores.push(score);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut term_scorer = term_weight.specialized_scorer(reader, 1.0)?;
|
||||||
|
for d in docs {
|
||||||
|
term_scorer.shallow_seek(d);
|
||||||
|
block_max_scores_b.push(term_scorer.block_max_score());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (l, r) in block_max_scores
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.zip(block_max_scores_b.iter().cloned())
|
||||||
|
{
|
||||||
|
assert_nearly_equals!(l, r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[ignore]
|
||||||
|
#[test]
|
||||||
|
fn test_block_wand_long_test() -> crate::Result<()> {
|
||||||
|
let mut schema_builder = Schema::builder();
|
||||||
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
|
let schema = schema_builder.build();
|
||||||
|
let index = Index::create_in_ram(schema);
|
||||||
|
let mut writer = index.writer_with_num_threads(3, 30_000_000)?;
|
||||||
|
use rand::Rng;
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||||
|
for _ in 0..3_000 {
|
||||||
|
let term_freq = rng.gen_range(1, 10000);
|
||||||
|
let words: Vec<&str> = std::iter::repeat("bbbb").take(term_freq).collect();
|
||||||
|
let text = words.join(" ");
|
||||||
|
writer.add_document(doc!(text_field=>text));
|
||||||
|
}
|
||||||
|
writer.commit()?;
|
||||||
|
let term_query = TermQuery::new(
|
||||||
|
Term::from_field_text(text_field, &"bbbb"),
|
||||||
|
IndexRecordOption::WithFreqs,
|
||||||
|
);
|
||||||
|
let segment_ids: Vec<SegmentId>;
|
||||||
|
let reader = index.reader()?;
|
||||||
|
{
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
segment_ids = searcher
|
||||||
|
.segment_readers()
|
||||||
|
.iter()
|
||||||
|
.map(|segment| segment.segment_id())
|
||||||
|
.collect();
|
||||||
|
test_block_wand_aux(&term_query, &searcher)?;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let _ = block_on(writer.merge(&segment_ids[..]));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
reader.reload()?;
|
||||||
|
let searcher = reader.searcher();
|
||||||
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
|
test_block_wand_aux(&term_query, &searcher)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,11 +4,10 @@ use crate::docset::DocSet;
|
|||||||
use crate::postings::SegmentPostings;
|
use crate::postings::SegmentPostings;
|
||||||
use crate::query::bm25::BM25Weight;
|
use crate::query::bm25::BM25Weight;
|
||||||
use crate::query::explanation::does_not_match;
|
use crate::query::explanation::does_not_match;
|
||||||
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
|
use crate::query::weight::for_each_scorer;
|
||||||
use crate::query::Weight;
|
use crate::query::Weight;
|
||||||
use crate::query::{Explanation, Scorer};
|
use crate::query::{Explanation, Scorer};
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::Result;
|
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
use crate::{DocId, Score};
|
use crate::{DocId, Score};
|
||||||
|
|
||||||
@@ -19,22 +18,22 @@ pub struct TermWeight {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Weight for TermWeight {
|
impl Weight for TermWeight {
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||||
let term_scorer = self.scorer_specialized(reader, boost)?;
|
let term_scorer = self.specialized_scorer(reader, boost)?;
|
||||||
Ok(Box::new(term_scorer))
|
Ok(Box::new(term_scorer))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||||
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
|
let mut scorer = self.specialized_scorer(reader, 1.0)?;
|
||||||
if scorer.seek(doc) != doc {
|
if scorer.seek(doc) != doc {
|
||||||
return Err(does_not_match(doc));
|
return Err(does_not_match(doc));
|
||||||
}
|
}
|
||||||
Ok(scorer.explain())
|
Ok(scorer.explain())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn count(&self, reader: &SegmentReader) -> Result<u32> {
|
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
Ok(self.scorer(reader, 1.0f32)?.count(delete_bitset))
|
Ok(self.scorer(reader, 1.0)?.count(delete_bitset))
|
||||||
} else {
|
} else {
|
||||||
let field = self.term.field();
|
let field = self.term.field();
|
||||||
Ok(reader
|
Ok(reader
|
||||||
@@ -52,7 +51,7 @@ impl Weight for TermWeight {
|
|||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
callback: &mut dyn FnMut(DocId, Score),
|
callback: &mut dyn FnMut(DocId, Score),
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
|
let mut scorer = self.specialized_scorer(reader, 1.0)?;
|
||||||
for_each_scorer(&mut scorer, callback);
|
for_each_scorer(&mut scorer, callback);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -69,12 +68,12 @@ impl Weight for TermWeight {
|
|||||||
/// important optimization (e.g. BlockWAND for union).
|
/// important optimization (e.g. BlockWAND for union).
|
||||||
fn for_each_pruning(
|
fn for_each_pruning(
|
||||||
&self,
|
&self,
|
||||||
threshold: f32,
|
threshold: Score,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let scorer = self.specialized_scorer(reader, 1.0)?;
|
||||||
for_each_pruning_scorer(&mut scorer, threshold, callback);
|
crate::query::boolean_query::block_wand(vec![scorer], threshold, callback);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -92,10 +91,14 @@ impl TermWeight {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn scorer_specialized(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
|
pub(crate) fn specialized_scorer(
|
||||||
|
&self,
|
||||||
|
reader: &SegmentReader,
|
||||||
|
boost: Score,
|
||||||
|
) -> crate::Result<TermScorer> {
|
||||||
let field = self.term.field();
|
let field = self.term.field();
|
||||||
let inverted_index = reader.inverted_index(field);
|
let inverted_index = reader.inverted_index(field);
|
||||||
let fieldnorm_reader = reader.get_fieldnorms_reader(field);
|
let fieldnorm_reader = reader.get_fieldnorms_reader(field)?;
|
||||||
let similarity_weight = self.similarity_weight.boost_by(boost);
|
let similarity_weight = self.similarity_weight.boost_by(boost);
|
||||||
let postings_opt: Option<SegmentPostings> =
|
let postings_opt: Option<SegmentPostings> =
|
||||||
inverted_index.read_postings(&self.term, self.index_record_option);
|
inverted_index.read_postings(&self.term, self.index_record_option);
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ where
|
|||||||
cursor: HORIZON_NUM_TINYBITSETS,
|
cursor: HORIZON_NUM_TINYBITSETS,
|
||||||
offset: 0,
|
offset: 0,
|
||||||
doc: 0,
|
doc: 0,
|
||||||
score: 0f32,
|
score: 0.0,
|
||||||
};
|
};
|
||||||
if union.refill() {
|
if union.refill() {
|
||||||
union.advance();
|
union.advance();
|
||||||
@@ -183,7 +183,10 @@ where
|
|||||||
// advance all docsets to a doc >= to the target.
|
// advance all docsets to a doc >= to the target.
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))]
|
#[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))]
|
||||||
unordered_drain_filter(&mut self.docsets, |docset| {
|
unordered_drain_filter(&mut self.docsets, |docset| {
|
||||||
docset.seek(target) == TERMINATED
|
if docset.doc() < target {
|
||||||
|
docset.seek(target);
|
||||||
|
}
|
||||||
|
docset.doc() == TERMINATED
|
||||||
});
|
});
|
||||||
|
|
||||||
// at this point all of the docsets
|
// at this point all of the docsets
|
||||||
@@ -271,7 +274,7 @@ mod tests {
|
|||||||
vals.iter()
|
vals.iter()
|
||||||
.cloned()
|
.cloned()
|
||||||
.map(VecDocSet::from)
|
.map(VecDocSet::from)
|
||||||
.map(|docset| ConstScorer::new(docset, 1.0f32))
|
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||||
.collect::<Vec<ConstScorer<VecDocSet>>>(),
|
.collect::<Vec<ConstScorer<VecDocSet>>>(),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
@@ -318,7 +321,7 @@ mod tests {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|docs| docs.clone())
|
.map(|docs| docs.clone())
|
||||||
.map(VecDocSet::from)
|
.map(VecDocSet::from)
|
||||||
.map(|docset| ConstScorer::new(docset, 1.0f32))
|
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
));
|
));
|
||||||
res
|
res
|
||||||
@@ -395,9 +398,9 @@ mod bench {
|
|||||||
|
|
||||||
use crate::query::score_combiner::DoNothingCombiner;
|
use crate::query::score_combiner::DoNothingCombiner;
|
||||||
use crate::query::{ConstScorer, Union, VecDocSet};
|
use crate::query::{ConstScorer, Union, VecDocSet};
|
||||||
use crate::tests;
|
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::DocSet;
|
use crate::DocSet;
|
||||||
|
use crate::{tests, TERMINATED};
|
||||||
use test::Bencher;
|
use test::Bencher;
|
||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
@@ -411,10 +414,12 @@ mod bench {
|
|||||||
union_docset
|
union_docset
|
||||||
.iter()
|
.iter()
|
||||||
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
|
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
|
||||||
.map(ConstScorer::new)
|
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
);
|
);
|
||||||
while v.advance() {}
|
while v.doc() != TERMINATED {
|
||||||
|
v.advance();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
#[bench]
|
#[bench]
|
||||||
@@ -429,10 +434,12 @@ mod bench {
|
|||||||
union_docset
|
union_docset
|
||||||
.iter()
|
.iter()
|
||||||
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
|
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
|
||||||
.map(ConstScorer::new)
|
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
);
|
);
|
||||||
while v.advance() {}
|
while v.doc() != TERMINATED {
|
||||||
|
v.advance();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
|
|||||||
/// important optimization (e.g. BlockWAND for union).
|
/// important optimization (e.g. BlockWAND for union).
|
||||||
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer + ?Sized>(
|
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer + ?Sized>(
|
||||||
scorer: &mut TScorer,
|
scorer: &mut TScorer,
|
||||||
mut threshold: f32,
|
mut threshold: Score,
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||||
) {
|
) {
|
||||||
let mut doc = scorer.doc();
|
let mut doc = scorer.doc();
|
||||||
@@ -51,14 +51,14 @@ pub trait Weight: Send + Sync + 'static {
|
|||||||
/// `boost` is a multiplier to apply to the score.
|
/// `boost` is a multiplier to apply to the score.
|
||||||
///
|
///
|
||||||
/// See [`Query`](./trait.Query.html).
|
/// See [`Query`](./trait.Query.html).
|
||||||
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>>;
|
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
|
||||||
|
|
||||||
/// Returns an `Explanation` for the given document.
|
/// Returns an `Explanation` for the given document.
|
||||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
|
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
|
||||||
|
|
||||||
/// Returns the number documents within the given `SegmentReader`.
|
/// Returns the number documents within the given `SegmentReader`.
|
||||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
if let Some(delete_bitset) = reader.delete_bitset() {
|
if let Some(delete_bitset) = reader.delete_bitset() {
|
||||||
Ok(scorer.count(delete_bitset))
|
Ok(scorer.count(delete_bitset))
|
||||||
} else {
|
} else {
|
||||||
@@ -73,7 +73,7 @@ pub trait Weight: Send + Sync + 'static {
|
|||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
callback: &mut dyn FnMut(DocId, Score),
|
callback: &mut dyn FnMut(DocId, Score),
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
for_each_scorer(scorer.as_mut(), callback);
|
for_each_scorer(scorer.as_mut(), callback);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -90,11 +90,11 @@ pub trait Weight: Send + Sync + 'static {
|
|||||||
/// important optimization (e.g. BlockWAND for union).
|
/// important optimization (e.g. BlockWAND for union).
|
||||||
fn for_each_pruning(
|
fn for_each_pruning(
|
||||||
&self,
|
&self,
|
||||||
threshold: f32,
|
threshold: Score,
|
||||||
reader: &SegmentReader,
|
reader: &SegmentReader,
|
||||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||||
) -> crate::Result<()> {
|
) -> crate::Result<()> {
|
||||||
let mut scorer = self.scorer(reader, 1.0f32)?;
|
let mut scorer = self.scorer(reader, 1.0)?;
|
||||||
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
|
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ pub enum ReloadPolicy {
|
|||||||
/// The index is entirely reloaded manually.
|
/// The index is entirely reloaded manually.
|
||||||
/// All updates of the index should be manual.
|
/// All updates of the index should be manual.
|
||||||
///
|
///
|
||||||
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
|
/// No change is reflected automatically. You are required to call `IndexReader::reload()` manually.
|
||||||
Manual,
|
Manual,
|
||||||
/// The index is reloaded within milliseconds after a new commit is available.
|
/// The index is reloaded within milliseconds after a new commit is available.
|
||||||
/// This is made possible by watching changes in the `meta.json` file.
|
/// This is made possible by watching changes in the `meta.json` file.
|
||||||
@@ -138,9 +138,11 @@ impl InnerIndexReader {
|
|||||||
.collect::<crate::Result<_>>()?
|
.collect::<crate::Result<_>>()?
|
||||||
};
|
};
|
||||||
let schema = self.index.schema();
|
let schema = self.index.schema();
|
||||||
let searchers = (0..self.num_searchers)
|
let searchers = std::iter::repeat_with(|| {
|
||||||
.map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
|
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
|
||||||
.collect();
|
})
|
||||||
|
.take(self.num_searchers)
|
||||||
|
.collect();
|
||||||
self.searcher_pool.publish_new_generation(searchers);
|
self.searcher_pool.publish_new_generation(searchers);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,9 +74,8 @@ impl Document {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Add a text field.
|
/// Add a text field.
|
||||||
pub fn add_text(&mut self, field: Field, text: &str) {
|
pub fn add_text<S: ToString>(&mut self, field: Field, text: S) {
|
||||||
let value = Value::Str(String::from(text));
|
self.add(FieldValue::new(field, Value::Str(text.to_string())));
|
||||||
self.add(FieldValue::new(field, value));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a pre-tokenized text field.
|
/// Add a pre-tokenized text field.
|
||||||
@@ -110,8 +109,8 @@ impl Document {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Add a bytes field
|
/// Add a bytes field
|
||||||
pub fn add_bytes(&mut self, field: Field, value: Vec<u8>) {
|
pub fn add_bytes<T: Into<Vec<u8>>>(&mut self, field: Field, value: T) {
|
||||||
self.add(FieldValue::new(field, Value::Bytes(value)))
|
self.add(FieldValue::new(field, Value::Bytes(value.into())))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a field value
|
/// Add a field value
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use crate::schema::IntOptions;
|
|
||||||
use crate::schema::TextOptions;
|
use crate::schema::TextOptions;
|
||||||
|
use crate::schema::{is_valid_field_name, IntOptions};
|
||||||
|
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use serde::de::{self, MapAccess, Visitor};
|
use serde::de::{self, MapAccess, Visitor};
|
||||||
@@ -14,7 +14,7 @@ use std::fmt;
|
|||||||
/// - a field name
|
/// - a field name
|
||||||
/// - a field type, itself wrapping up options describing
|
/// - a field type, itself wrapping up options describing
|
||||||
/// how the field should be indexed.
|
/// how the field should be indexed.
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct FieldEntry {
|
pub struct FieldEntry {
|
||||||
name: String,
|
name: String,
|
||||||
field_type: FieldType,
|
field_type: FieldType,
|
||||||
@@ -24,6 +24,7 @@ impl FieldEntry {
|
|||||||
/// Creates a new u64 field entry in the schema, given
|
/// Creates a new u64 field entry in the schema, given
|
||||||
/// a name, and some options.
|
/// a name, and some options.
|
||||||
pub fn new_text(field_name: String, text_options: TextOptions) -> FieldEntry {
|
pub fn new_text(field_name: String, text_options: TextOptions) -> FieldEntry {
|
||||||
|
assert!(is_valid_field_name(&field_name));
|
||||||
FieldEntry {
|
FieldEntry {
|
||||||
name: field_name,
|
name: field_name,
|
||||||
field_type: FieldType::Str(text_options),
|
field_type: FieldType::Str(text_options),
|
||||||
@@ -33,6 +34,7 @@ impl FieldEntry {
|
|||||||
/// Creates a new u64 field entry in the schema, given
|
/// Creates a new u64 field entry in the schema, given
|
||||||
/// a name, and some options.
|
/// a name, and some options.
|
||||||
pub fn new_u64(field_name: String, field_type: IntOptions) -> FieldEntry {
|
pub fn new_u64(field_name: String, field_type: IntOptions) -> FieldEntry {
|
||||||
|
assert!(is_valid_field_name(&field_name));
|
||||||
FieldEntry {
|
FieldEntry {
|
||||||
name: field_name,
|
name: field_name,
|
||||||
field_type: FieldType::U64(field_type),
|
field_type: FieldType::U64(field_type),
|
||||||
@@ -42,6 +44,7 @@ impl FieldEntry {
|
|||||||
/// Creates a new i64 field entry in the schema, given
|
/// Creates a new i64 field entry in the schema, given
|
||||||
/// a name, and some options.
|
/// a name, and some options.
|
||||||
pub fn new_i64(field_name: String, field_type: IntOptions) -> FieldEntry {
|
pub fn new_i64(field_name: String, field_type: IntOptions) -> FieldEntry {
|
||||||
|
assert!(is_valid_field_name(&field_name));
|
||||||
FieldEntry {
|
FieldEntry {
|
||||||
name: field_name,
|
name: field_name,
|
||||||
field_type: FieldType::I64(field_type),
|
field_type: FieldType::I64(field_type),
|
||||||
@@ -51,6 +54,7 @@ impl FieldEntry {
|
|||||||
/// Creates a new f64 field entry in the schema, given
|
/// Creates a new f64 field entry in the schema, given
|
||||||
/// a name, and some options.
|
/// a name, and some options.
|
||||||
pub fn new_f64(field_name: String, field_type: IntOptions) -> FieldEntry {
|
pub fn new_f64(field_name: String, field_type: IntOptions) -> FieldEntry {
|
||||||
|
assert!(is_valid_field_name(&field_name));
|
||||||
FieldEntry {
|
FieldEntry {
|
||||||
name: field_name,
|
name: field_name,
|
||||||
field_type: FieldType::F64(field_type),
|
field_type: FieldType::F64(field_type),
|
||||||
@@ -60,6 +64,7 @@ impl FieldEntry {
|
|||||||
/// Creates a new date field entry in the schema, given
|
/// Creates a new date field entry in the schema, given
|
||||||
/// a name, and some options.
|
/// a name, and some options.
|
||||||
pub fn new_date(field_name: String, field_type: IntOptions) -> FieldEntry {
|
pub fn new_date(field_name: String, field_type: IntOptions) -> FieldEntry {
|
||||||
|
assert!(is_valid_field_name(&field_name));
|
||||||
FieldEntry {
|
FieldEntry {
|
||||||
name: field_name,
|
name: field_name,
|
||||||
field_type: FieldType::Date(field_type),
|
field_type: FieldType::Date(field_type),
|
||||||
@@ -68,6 +73,7 @@ impl FieldEntry {
|
|||||||
|
|
||||||
/// Creates a field entry for a facet.
|
/// Creates a field entry for a facet.
|
||||||
pub fn new_facet(field_name: String) -> FieldEntry {
|
pub fn new_facet(field_name: String) -> FieldEntry {
|
||||||
|
assert!(is_valid_field_name(&field_name));
|
||||||
FieldEntry {
|
FieldEntry {
|
||||||
name: field_name,
|
name: field_name,
|
||||||
field_type: FieldType::HierarchicalFacet,
|
field_type: FieldType::HierarchicalFacet,
|
||||||
@@ -76,6 +82,7 @@ impl FieldEntry {
|
|||||||
|
|
||||||
/// Creates a field entry for a bytes field
|
/// Creates a field entry for a bytes field
|
||||||
pub fn new_bytes(field_name: String) -> FieldEntry {
|
pub fn new_bytes(field_name: String) -> FieldEntry {
|
||||||
|
assert!(is_valid_field_name(&field_name));
|
||||||
FieldEntry {
|
FieldEntry {
|
||||||
name: field_name,
|
name: field_name,
|
||||||
field_type: FieldType::Bytes,
|
field_type: FieldType::Bytes,
|
||||||
@@ -268,6 +275,12 @@ mod tests {
|
|||||||
use crate::schema::TEXT;
|
use crate::schema::TEXT;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn test_invalid_field_name_should_panic() {
|
||||||
|
FieldEntry::new_text("-hello".to_string(), TEXT);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_json_serialization() {
|
fn test_json_serialization() {
|
||||||
let field_value = FieldEntry::new_text(String::from("title"), TEXT);
|
let field_value = FieldEntry::new_text(String::from("title"), TEXT);
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ pub enum Type {
|
|||||||
|
|
||||||
/// A `FieldType` describes the type (text, u64) of a field as well as
|
/// A `FieldType` describes the type (text, u64) of a field as well as
|
||||||
/// how it should be handled by tantivy.
|
/// how it should be handled by tantivy.
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub enum FieldType {
|
pub enum FieldType {
|
||||||
/// String field type configuration
|
/// String field type configuration
|
||||||
Str(TextOptions),
|
Str(TextOptions),
|
||||||
|
|||||||
@@ -149,14 +149,16 @@ pub use self::int_options::IntOptions;
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
|
|
||||||
|
/// Regular expression representing the restriction on a valid field names.
|
||||||
|
pub const FIELD_NAME_PATTERN: &str = r#"^[_a-zA-Z][_\-a-zA-Z0-9]*$"#;
|
||||||
|
|
||||||
/// Validator for a potential `field_name`.
|
/// Validator for a potential `field_name`.
|
||||||
/// Returns true iff the name can be use for a field name.
|
/// Returns true iff the name can be use for a field name.
|
||||||
///
|
///
|
||||||
/// A field name must start by a letter `[a-zA-Z]`.
|
/// A field name must start by a letter `[a-zA-Z]`.
|
||||||
/// The other characters can be any alphanumic character `[a-ZA-Z0-9]` or `_`.
|
/// The other characters can be any alphanumic character `[a-ZA-Z0-9]` or `_`.
|
||||||
pub fn is_valid_field_name(field_name: &str) -> bool {
|
pub fn is_valid_field_name(field_name: &str) -> bool {
|
||||||
static FIELD_NAME_PTN: Lazy<Regex> =
|
static FIELD_NAME_PTN: Lazy<Regex> = Lazy::new(|| Regex::new(FIELD_NAME_PATTERN).unwrap());
|
||||||
Lazy::new(|| Regex::new("^[a-zA-Z][_a-zA-Z0-9]*$").unwrap());
|
|
||||||
FIELD_NAME_PTN.is_match(field_name)
|
FIELD_NAME_PTN.is_match(field_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -170,6 +172,11 @@ mod tests {
|
|||||||
assert!(is_valid_field_name("text"));
|
assert!(is_valid_field_name("text"));
|
||||||
assert!(is_valid_field_name("text0"));
|
assert!(is_valid_field_name("text0"));
|
||||||
assert!(!is_valid_field_name("0text"));
|
assert!(!is_valid_field_name("0text"));
|
||||||
|
assert!(is_valid_field_name("field-name"));
|
||||||
|
assert!(is_valid_field_name("field_name"));
|
||||||
|
assert!(!is_valid_field_name("field!name"));
|
||||||
|
assert!(!is_valid_field_name("-fieldname"));
|
||||||
|
assert!(is_valid_field_name("_fieldname"));
|
||||||
assert!(!is_valid_field_name(""));
|
assert!(!is_valid_field_name(""));
|
||||||
assert!(!is_valid_field_name("シャボン玉"));
|
assert!(!is_valid_field_name("シャボン玉"));
|
||||||
assert!(is_valid_field_name("my_text_field"));
|
assert!(is_valid_field_name("my_text_field"));
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use super::Field;
|
|||||||
use crate::common;
|
use crate::common;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::DateTime;
|
use crate::DateTime;
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
|
||||||
use std::str;
|
use std::str;
|
||||||
|
|
||||||
/// Size (in bytes) of the buffer of a int field.
|
/// Size (in bytes) of the buffer of a int field.
|
||||||
@@ -19,6 +18,10 @@ where
|
|||||||
B: AsRef<[u8]>;
|
B: AsRef<[u8]>;
|
||||||
|
|
||||||
impl Term {
|
impl Term {
|
||||||
|
pub(crate) fn new() -> Term {
|
||||||
|
Term(Vec::with_capacity(100))
|
||||||
|
}
|
||||||
|
|
||||||
/// Builds a term given a field, and a i64-value
|
/// Builds a term given a field, and a i64-value
|
||||||
///
|
///
|
||||||
/// Assuming the term has a field id of 1, and a i64 value of 3234,
|
/// Assuming the term has a field id of 1, and a i64 value of 3234,
|
||||||
@@ -93,6 +96,12 @@ impl Term {
|
|||||||
term
|
term
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
|
||||||
|
let mut term = Term::for_field(field);
|
||||||
|
term.set_bytes(bytes);
|
||||||
|
term
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates a new Term for a given field.
|
/// Creates a new Term for a given field.
|
||||||
pub(crate) fn for_field(field: Field) -> Term {
|
pub(crate) fn for_field(field: Field) -> Term {
|
||||||
let mut term = Term(Vec::with_capacity(100));
|
let mut term = Term(Vec::with_capacity(100));
|
||||||
@@ -100,12 +109,10 @@ impl Term {
|
|||||||
term
|
term
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the field.
|
pub(crate) fn set_field(&mut self, field: Field) {
|
||||||
pub fn set_field(&mut self, field: Field) {
|
self.0.clear();
|
||||||
if self.0.len() < 4 {
|
self.0
|
||||||
self.0.resize(4, 0u8);
|
.extend_from_slice(&field.field_id().to_be_bytes()[..]);
|
||||||
}
|
|
||||||
BigEndian::write_u32(&mut self.0[0..4], field.field_id());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets a u64 value in the term.
|
/// Sets a u64 value in the term.
|
||||||
@@ -116,7 +123,7 @@ impl Term {
|
|||||||
/// the natural order of the values.
|
/// the natural order of the values.
|
||||||
pub fn set_u64(&mut self, val: u64) {
|
pub fn set_u64(&mut self, val: u64) {
|
||||||
self.0.resize(INT_TERM_LEN, 0u8);
|
self.0.resize(INT_TERM_LEN, 0u8);
|
||||||
BigEndian::write_u64(&mut self.0[4..], val);
|
self.0[4..12].copy_from_slice(val.to_be_bytes().as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets a `i64` value in the term.
|
/// Sets a `i64` value in the term.
|
||||||
@@ -134,12 +141,6 @@ impl Term {
|
|||||||
self.0.extend(bytes);
|
self.0.extend(bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
|
|
||||||
let mut term = Term::for_field(field);
|
|
||||||
term.set_bytes(bytes);
|
|
||||||
term
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the texts only, keeping the field untouched.
|
/// Set the texts only, keeping the field untouched.
|
||||||
pub fn set_text(&mut self, text: &str) {
|
pub fn set_text(&mut self, text: &str) {
|
||||||
self.set_bytes(text.as_bytes());
|
self.set_bytes(text.as_bytes());
|
||||||
@@ -157,7 +158,9 @@ where
|
|||||||
|
|
||||||
/// Returns the field.
|
/// Returns the field.
|
||||||
pub fn field(&self) -> Field {
|
pub fn field(&self) -> Field {
|
||||||
Field::from_field_id(BigEndian::read_u32(&self.0.as_ref()[..4]))
|
let mut field_id_bytes = [0u8; 4];
|
||||||
|
field_id_bytes.copy_from_slice(&self.0.as_ref()[..4]);
|
||||||
|
Field::from_field_id(u32::from_be_bytes(field_id_bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `u64` value stored in a term.
|
/// Returns the `u64` value stored in a term.
|
||||||
@@ -166,7 +169,9 @@ where
|
|||||||
/// ... or returns an invalid value
|
/// ... or returns an invalid value
|
||||||
/// if the term is not a `u64` field.
|
/// if the term is not a `u64` field.
|
||||||
pub fn get_u64(&self) -> u64 {
|
pub fn get_u64(&self) -> u64 {
|
||||||
BigEndian::read_u64(&self.0.as_ref()[4..])
|
let mut field_id_bytes = [0u8; 8];
|
||||||
|
field_id_bytes.copy_from_slice(self.value_bytes());
|
||||||
|
u64::from_be_bytes(field_id_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `i64` value stored in a term.
|
/// Returns the `i64` value stored in a term.
|
||||||
@@ -175,7 +180,7 @@ where
|
|||||||
/// ... or returns an invalid value
|
/// ... or returns an invalid value
|
||||||
/// if the term is not a `i64` field.
|
/// if the term is not a `i64` field.
|
||||||
pub fn get_i64(&self) -> i64 {
|
pub fn get_i64(&self) -> i64 {
|
||||||
common::u64_to_i64(BigEndian::read_u64(&self.0.as_ref()[4..]))
|
common::u64_to_i64(self.get_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `f64` value stored in a term.
|
/// Returns the `f64` value stored in a term.
|
||||||
@@ -184,7 +189,7 @@ where
|
|||||||
/// ... or returns an invalid value
|
/// ... or returns an invalid value
|
||||||
/// if the term is not a `f64` field.
|
/// if the term is not a `f64` field.
|
||||||
pub fn get_f64(&self) -> f64 {
|
pub fn get_f64(&self) -> f64 {
|
||||||
common::u64_to_f64(BigEndian::read_u64(&self.0.as_ref()[4..]))
|
common::u64_to_f64(self.get_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the text associated with the term.
|
/// Returns the text associated with the term.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::borrow::Cow;
|
|||||||
use std::ops::BitOr;
|
use std::ops::BitOr;
|
||||||
|
|
||||||
/// Define how a text field should be handled by tantivy.
|
/// Define how a text field should be handled by tantivy.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct TextOptions {
|
pub struct TextOptions {
|
||||||
indexing: Option<TextFieldIndexing>,
|
indexing: Option<TextFieldIndexing>,
|
||||||
stored: bool,
|
stored: bool,
|
||||||
@@ -51,7 +51,7 @@ impl Default for TextOptions {
|
|||||||
/// - the amount of information that should be stored about the presence of a term in a document.
|
/// - the amount of information that should be stored about the presence of a term in a document.
|
||||||
/// Essentially, should we store the term frequency and/or the positions (See [`IndexRecordOption`](./enum.IndexRecordOption.html)).
|
/// Essentially, should we store the term frequency and/or the positions (See [`IndexRecordOption`](./enum.IndexRecordOption.html)).
|
||||||
/// - the name of the `Tokenizer` that should be used to process the field.
|
/// - the name of the `Tokenizer` that should be used to process the field.
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
|
||||||
pub struct TextFieldIndexing {
|
pub struct TextFieldIndexing {
|
||||||
record: IndexRecordOption,
|
record: IndexRecordOption,
|
||||||
tokenizer: Cow<'static, str>,
|
tokenizer: Cow<'static, str>,
|
||||||
|
|||||||
@@ -221,6 +221,12 @@ impl<'a> From<&'a str> for Value {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a [u8]> for Value {
|
||||||
|
fn from(bytes: &'a [u8]) -> Value {
|
||||||
|
Value::Bytes(bytes.to_vec())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'a> From<Facet> for Value {
|
impl<'a> From<Facet> for Value {
|
||||||
fn from(facet: Facet) -> Value {
|
fn from(facet: Facet) -> Value {
|
||||||
Value::Facet(facet)
|
Value::Facet(facet)
|
||||||
|
|||||||
@@ -2,8 +2,8 @@ use crate::query::Query;
|
|||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::tokenizer::{TextAnalyzer, Token};
|
use crate::tokenizer::{TextAnalyzer, Token};
|
||||||
use crate::Document;
|
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
|
use crate::{Document, Score};
|
||||||
use htmlescape::encode_minimal;
|
use htmlescape::encode_minimal;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
@@ -30,7 +30,7 @@ impl HighlightSection {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct FragmentCandidate {
|
pub struct FragmentCandidate {
|
||||||
score: f32,
|
score: Score,
|
||||||
start_offset: usize,
|
start_offset: usize,
|
||||||
stop_offset: usize,
|
stop_offset: usize,
|
||||||
num_chars: usize,
|
num_chars: usize,
|
||||||
@@ -58,7 +58,7 @@ impl FragmentCandidate {
|
|||||||
/// taking the token and terms, the token is added to the fragment.
|
/// taking the token and terms, the token is added to the fragment.
|
||||||
/// if the token is one of the terms, the score
|
/// if the token is one of the terms, the score
|
||||||
/// and highlighted fields are updated in the fragment.
|
/// and highlighted fields are updated in the fragment.
|
||||||
fn try_add_token(&mut self, token: &Token, terms: &BTreeMap<String, f32>) {
|
fn try_add_token(&mut self, token: &Token, terms: &BTreeMap<String, Score>) {
|
||||||
self.stop_offset = token.offset_to;
|
self.stop_offset = token.offset_to;
|
||||||
|
|
||||||
if let Some(&score) = terms.get(&token.text.to_lowercase()) {
|
if let Some(&score) = terms.get(&token.text.to_lowercase()) {
|
||||||
@@ -142,7 +142,7 @@ impl Snippet {
|
|||||||
fn search_fragments<'a>(
|
fn search_fragments<'a>(
|
||||||
tokenizer: &TextAnalyzer,
|
tokenizer: &TextAnalyzer,
|
||||||
text: &'a str,
|
text: &'a str,
|
||||||
terms: &BTreeMap<String, f32>,
|
terms: &BTreeMap<String, Score>,
|
||||||
max_num_chars: usize,
|
max_num_chars: usize,
|
||||||
) -> Vec<FragmentCandidate> {
|
) -> Vec<FragmentCandidate> {
|
||||||
let mut token_stream = tokenizer.token_stream(text);
|
let mut token_stream = tokenizer.token_stream(text);
|
||||||
@@ -221,7 +221,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
|
|||||||
/// # let text_field = schema_builder.add_text_field("text", TEXT);
|
/// # let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
/// # let schema = schema_builder.build();
|
/// # let schema = schema_builder.build();
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 30_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
|
||||||
/// # let doc = doc!(text_field => r#"Comme je descendais des Fleuves impassibles,
|
/// # let doc = doc!(text_field => r#"Comme je descendais des Fleuves impassibles,
|
||||||
/// # Je ne me sentis plus guidé par les haleurs :
|
/// # Je ne me sentis plus guidé par les haleurs :
|
||||||
/// # Des Peaux-Rouges criards les avaient pris pour cibles,
|
/// # Des Peaux-Rouges criards les avaient pris pour cibles,
|
||||||
@@ -248,7 +248,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
|
|||||||
/// # }
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
pub struct SnippetGenerator {
|
pub struct SnippetGenerator {
|
||||||
terms_text: BTreeMap<String, f32>,
|
terms_text: BTreeMap<String, Score>,
|
||||||
tokenizer: TextAnalyzer,
|
tokenizer: TextAnalyzer,
|
||||||
field: Field,
|
field: Field,
|
||||||
max_num_chars: usize,
|
max_num_chars: usize,
|
||||||
@@ -263,12 +263,12 @@ impl SnippetGenerator {
|
|||||||
) -> crate::Result<SnippetGenerator> {
|
) -> crate::Result<SnippetGenerator> {
|
||||||
let mut terms = BTreeSet::new();
|
let mut terms = BTreeSet::new();
|
||||||
query.query_terms(&mut terms);
|
query.query_terms(&mut terms);
|
||||||
let terms_text: BTreeMap<String, f32> = terms
|
let terms_text: BTreeMap<String, Score> = terms
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|term| term.field() == field)
|
.filter(|term| term.field() == field)
|
||||||
.flat_map(|term| {
|
.flat_map(|term| {
|
||||||
let doc_freq = searcher.doc_freq(&term);
|
let doc_freq = searcher.doc_freq(&term);
|
||||||
let score = 1f32 / (1f32 + doc_freq as f32);
|
let score = 1.0 / (1.0 + doc_freq as Score);
|
||||||
if doc_freq > 0 {
|
if doc_freq > 0 {
|
||||||
Some((term.text().to_string(), score))
|
Some((term.text().to_string(), score))
|
||||||
} else {
|
} else {
|
||||||
@@ -291,7 +291,7 @@ impl SnippetGenerator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn terms_text(&self) -> &BTreeMap<String, f32> {
|
pub fn terms_text(&self) -> &BTreeMap<String, Score> {
|
||||||
&self.terms_text
|
&self.terms_text
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -373,8 +373,8 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
fn test_snippet_scored_fragment() {
|
fn test_snippet_scored_fragment() {
|
||||||
{
|
{
|
||||||
let terms = btreemap! {
|
let terms = btreemap! {
|
||||||
String::from("rust") =>1.0f32,
|
String::from("rust") =>1.0,
|
||||||
String::from("language") => 0.9f32
|
String::from("language") => 0.9
|
||||||
};
|
};
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
||||||
{
|
{
|
||||||
@@ -387,8 +387,8 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let terms = btreemap! {
|
let terms = btreemap! {
|
||||||
String::from("rust") =>0.9f32,
|
String::from("rust") =>0.9,
|
||||||
String::from("language") => 1.0f32
|
String::from("language") => 1.0
|
||||||
};
|
};
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
||||||
//assert_eq!(fragments.len(), 7);
|
//assert_eq!(fragments.len(), 7);
|
||||||
@@ -506,7 +506,7 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(text_field => "a"));
|
index_writer.add_document(doc!(text_field => "a"));
|
||||||
index_writer.add_document(doc!(text_field => "a"));
|
index_writer.add_document(doc!(text_field => "a"));
|
||||||
index_writer.add_document(doc!(text_field => "a b"));
|
index_writer.add_document(doc!(text_field => "a b"));
|
||||||
@@ -525,7 +525,7 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
let snippet_generator =
|
let snippet_generator =
|
||||||
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
|
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&btreemap!("a".to_string() => 0.25f32),
|
&btreemap!("a".to_string() => 0.25),
|
||||||
snippet_generator.terms_text()
|
snippet_generator.terms_text()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -534,7 +534,7 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
let snippet_generator =
|
let snippet_generator =
|
||||||
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
|
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
|
&btreemap!("a".to_string() => 0.25, "b".to_string() => 0.5),
|
||||||
snippet_generator.terms_text()
|
snippet_generator.terms_text()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -543,7 +543,7 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
let snippet_generator =
|
let snippet_generator =
|
||||||
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
|
SnippetGenerator::create(&searcher, &*query, text_field).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&btreemap!("a".to_string() => 0.25f32, "b".to_string() => 0.5),
|
&btreemap!("a".to_string() => 0.25, "b".to_string() => 0.5),
|
||||||
snippet_generator.terms_text()
|
snippet_generator.terms_text()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -562,7 +562,7 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
let index = Index::create_in_ram(schema);
|
let index = Index::create_in_ram(schema);
|
||||||
{
|
{
|
||||||
// writing the segment
|
// writing the segment
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
{
|
{
|
||||||
let doc = doc ! (text_field => TEST_TEXT);
|
let doc = doc ! (text_field => TEST_TEXT);
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ pub enum ComponentSpaceUsage {
|
|||||||
Store(StoreSpaceUsage),
|
Store(StoreSpaceUsage),
|
||||||
/// Some sort of raw byte count
|
/// Some sort of raw byte count
|
||||||
Basic(ByteCount),
|
Basic(ByteCount),
|
||||||
|
///
|
||||||
|
Unimplemented,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Represents combined space usage of an entire searcher and its component segments.
|
/// Represents combined space usage of an entire searcher and its component segments.
|
||||||
@@ -119,7 +121,7 @@ impl SegmentSpaceUsage {
|
|||||||
/// Clones the underlying data.
|
/// Clones the underlying data.
|
||||||
/// Use the components directly if this is somehow in performance critical code.
|
/// Use the components directly if this is somehow in performance critical code.
|
||||||
pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage {
|
pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage {
|
||||||
use self::ComponentSpaceUsage::*;
|
use self::ComponentSpaceUsage::{Basic, PerField, Store, Unimplemented};
|
||||||
use crate::SegmentComponent::*;
|
use crate::SegmentComponent::*;
|
||||||
match component {
|
match component {
|
||||||
POSTINGS => PerField(self.postings().clone()),
|
POSTINGS => PerField(self.postings().clone()),
|
||||||
@@ -130,6 +132,7 @@ impl SegmentSpaceUsage {
|
|||||||
TERMS => PerField(self.termdict().clone()),
|
TERMS => PerField(self.termdict().clone()),
|
||||||
STORE => Store(self.store().clone()),
|
STORE => Store(self.store().clone()),
|
||||||
DELETE => Basic(self.deletes()),
|
DELETE => Basic(self.deletes()),
|
||||||
|
FIELDSTATS => Unimplemented,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -336,7 +339,7 @@ mod test {
|
|||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(name => 1u64));
|
index_writer.add_document(doc!(name => 1u64));
|
||||||
index_writer.add_document(doc!(name => 2u64));
|
index_writer.add_document(doc!(name => 2u64));
|
||||||
index_writer.add_document(doc!(name => 10u64));
|
index_writer.add_document(doc!(name => 10u64));
|
||||||
@@ -374,7 +377,7 @@ mod test {
|
|||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(name => "hi"));
|
index_writer.add_document(doc!(name => "hi"));
|
||||||
index_writer.add_document(doc!(name => "this is a test"));
|
index_writer.add_document(doc!(name => "this is a test"));
|
||||||
index_writer.add_document(
|
index_writer.add_document(
|
||||||
@@ -414,7 +417,7 @@ mod test {
|
|||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(name => "hi"));
|
index_writer.add_document(doc!(name => "hi"));
|
||||||
index_writer.add_document(doc!(name => "this is a test"));
|
index_writer.add_document(doc!(name => "this is a test"));
|
||||||
index_writer.add_document(
|
index_writer.add_document(
|
||||||
@@ -453,7 +456,7 @@ mod test {
|
|||||||
let index = Index::create_in_ram(schema.clone());
|
let index = Index::create_in_ram(schema.clone());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_for_tests().unwrap();
|
||||||
index_writer.add_document(doc!(name => 1u64));
|
index_writer.add_document(doc!(name => 1u64));
|
||||||
index_writer.add_document(doc!(name => 2u64));
|
index_writer.add_document(doc!(name => 2u64));
|
||||||
index_writer.add_document(doc!(name => 3u64));
|
index_writer.add_document(doc!(name => 3u64));
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user