Compare commits

..

1 Commits

Author SHA1 Message Date
Paul Masurel
063ed30f66 Added field norm readers 2020-07-20 11:59:43 +09:00
160 changed files with 4461 additions and 11805 deletions

2
.gitignore vendored
View File

@@ -1,5 +1,4 @@
tantivy.iml tantivy.iml
proptest-regressions
*.swp *.swp
target target
target/debug target/debug
@@ -12,4 +11,3 @@ cpp/simdcomp/bitpackingbenchmark
*.bk *.bk
.idea .idea
trace.dat trace.dat
cargo-timing*

View File

@@ -1,38 +1,5 @@
Tantivy 0.14.0
=========================
- Remove dependency to atomicwrites #833 .Implemented by @pmasurel upon suggestion and research from @asafigan).
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
- API Change. Accessing the typed value off a `Schema::Value` now returns an Option instead of panicking if the type does not match.
- Large API Change in the Directory API. Tantivy used to assume that all files could be somehow memory mapped. After this change, Directory return a `FileSlice` that can be reduced and eventually read into an `OwnedBytes` object. Long and blocking io operation are still required by they do not span over the entire file.
- Added support for Brotli compression in the DocStore. (@ppodolsky)
- Added helper for building intersections and unions in BooleanQuery (@guilload)
- Bugfix in `Query::explain`
- Removed dependency on `notify` #924. Replaced with `FileWatcher` struct that polls meta file every 500ms in background thread. (@halvorboe @guilload)
- Added `FilterCollector`, which wraps another collector and filters docs using a predicate over a fast field (@barrotsteindev)
- Simplified the encoding of the skip reader struct. BlockWAND max tf is now encoded over a single byte. (@pmasurel)
- `FilterCollector` now supports all Fast Field value types (@barrotsteindev)
This version breaks compatibility and requires users to reindex everything.
Tantivy 0.13.2
===================
Bugfix. Acquiring a facet reader on a segment that does not contain any
doc with this facet returns `None`. (#896)
Tantivy 0.13.1
===================
Made `Query` and `Collector` `Send + Sync`.
Updated misc dependency versions.
Tantivy 0.13.0 Tantivy 0.13.0
====================== ======================
Tantivy 0.13 introduce a change in the index format that will require
you to reindex your index (BlockWAND information are added in the skiplist).
The index size increase is minor as this information is only added for
full blocks.
If you have a massive index for which reindexing is not an option, please contact me
so that we can discuss possible solutions.
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball) - Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send. - Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
- `MMapDirectory::open` does not return a `Result` anymore. - `MMapDirectory::open` does not return a `Result` anymore.
@@ -50,8 +17,6 @@ while doc != TERMINATED {
The change made it possible to greatly simplify a lot of the docset's code. The change made it possible to greatly simplify a lot of the docset's code.
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton) - Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
- Added an offset option to the Top(.*)Collectors. (@robyoung) - Added an offset option to the Top(.*)Collectors. (@robyoung)
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
to the PISA team for answering all my questions!)
Tantivy 0.12.0 Tantivy 0.12.0
====================== ======================

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.14.0-dev" version = "0.12.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -13,51 +13,51 @@ keywords = ["search", "information", "retrieval"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
base64 = "0.13" base64 = "0.12.0"
byteorder = "1" byteorder = "1.0"
crc32fast = "1" crc32fast = "1.2.0"
once_cell = "1" once_cell = "1.0"
regex ={version = "1", default-features = false, features = ["std"]} regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.3" tantivy-fst = "0.3"
memmap = {version = "0.7", optional=true} memmap = {version = "0.7", optional=true}
lz4 = {version="1", optional=true} lz4 = {version="1.20", optional=true}
brotli = {version="3.3.0", optional=true}
snap = "1" snap = "1"
tempfile = {version="3", optional=true} atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4" log = "0.4"
serde = {version="1", features=["derive"]} serde = {version="1.0", features=["derive"]}
serde_json = "1" serde_json = "1.0"
num_cpus = "1" num_cpus = "1.2"
fs2={version="0.4", optional=true} fs2={version="0.4", optional=true}
levenshtein_automata = "0.2" levenshtein_automata = "0.2"
notify = {version="4", optional=true}
uuid = { version = "0.8", features = ["v4", "serde"] } uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.8" crossbeam = "0.7"
futures = {version = "0.3", features=["thread-pool"] } futures = {version = "0.3", features=["thread-pool"] }
tantivy-query-grammar = { version="0.14.0-dev", path="./query-grammar" } owning_ref = "0.4"
stable_deref_trait = "1" stable_deref_trait = "1.0.0"
rust-stemmers = "1" rust-stemmers = "1.2"
downcast-rs = "1" downcast-rs = { version="1.0" }
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]} bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4" census = "0.4"
fnv = "1" fnv = "1.0.6"
thiserror = "1.0" owned-read = "0.4"
htmlescape = "0.3" failure = "0.1"
htmlescape = "0.3.1"
fail = "0.4" fail = "0.4"
murmurhash32 = "0.2" murmurhash32 = "0.2"
chrono = "0.4" chrono = "0.4"
smallvec = "1" smallvec = "1.0"
rayon = "1" rayon = "1"
lru = "0.6"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.3" winapi = "0.3"
[dev-dependencies] [dev-dependencies]
rand = "0.8" rand = "0.7"
maplit = "1" maplit = "1"
matches = "0.1.8" matches = "0.1.8"
proptest = "0.10"
criterion = "0.3"
[dev-dependencies.fail] [dev-dependencies.fail]
version = "0.4" version = "0.4"
@@ -74,8 +74,7 @@ overflow-checks = true
[features] [features]
default = ["mmap"] default = ["mmap"]
mmap = ["fs2", "tempfile", "memmap"] mmap = ["atomicwrites", "fs2", "memmap", "notify"]
brotli-compression = ["brotli"]
lz4-compression = ["lz4"] lz4-compression = ["lz4"]
failpoints = ["fail/failpoints"] failpoints = ["fail/failpoints"]
unstable = [] # useful for benches. unstable = [] # useful for benches.
@@ -98,7 +97,3 @@ travis-ci = { repository = "tantivy-search/tantivy" }
name = "failpoints" name = "failpoints"
path = "tests/failpoints/mod.rs" path = "tests/failpoints/mod.rs"
required-features = ["fail/failpoints"] required-features = ["fail/failpoints"]
[[bench]]
name = "analyzer"
harness = false

View File

@@ -5,6 +5,7 @@
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master) [![Build status](https://ci.appveyor.com/api/projects/status/r7nb13kj23u8m9pj/branch/master?svg=true)](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy) [![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
[![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png) ![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
@@ -33,6 +34,11 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs The following [benchmark](https://tantivy-search.github.io/bench/) break downs
performance for different type of queries / collection. performance for different type of queries / collection.
In general, Tantivy tends to be
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
- faster than Lucene on intersection and phrase queries.
Your mileage WILL vary depending on the nature of queries and their load. Your mileage WILL vary depending on the nature of queries and their load.
# Features # Features
@@ -84,7 +90,7 @@ There are many ways to support this project.
- Help with documentation by asking questions or submitting PRs - Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy)) - Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy))
- Talk about Tantivy around you - Talk about Tantivy around you
- [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) - Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
# Contributing code # Contributing code

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +0,0 @@
use criterion::{criterion_group, criterion_main, Criterion};
use tantivy::tokenizer::TokenizerManager;
const ALICE_TXT: &'static str = include_str!("alice.txt");
pub fn criterion_benchmark(c: &mut Criterion) {
let tokenizer_manager = TokenizerManager::default();
let tokenizer = tokenizer_manager.get("default").unwrap();
c.bench_function("default-tokenize-alice", |b| {
b.iter(|| {
let mut word_count = 0;
let mut token_stream = tokenizer.token_stream(ALICE_TXT);
while token_stream.advance() {
word_count += 1;
}
assert_eq!(word_count, 30_731);
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@@ -112,6 +112,18 @@ fn main() -> tantivy::Result<()> {
limbs and branches that arch over the pool" limbs and branches that arch over the pool"
)); ));
index_writer.add_document(doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
// Multivalued field just need to be repeated. // Multivalued field just need to be repeated.
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "Frankenstein", title => "Frankenstein",

View File

@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::Field; use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT}; use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Score, SegmentReader, TantivyError}; use tantivy::{doc, Index, SegmentReader, TantivyError};
#[derive(Default)] #[derive(Default)]
struct Stats { struct Stats {
@@ -114,7 +114,7 @@ struct StatsSegmentCollector {
impl SegmentCollector for StatsSegmentCollector { impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>; type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: Score) { fn collect(&mut self, doc: u32, _score: f32) {
let value = self.fast_field_reader.get(doc) as f64; let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1; self.stats.count += 1;
self.stats.sum += value; self.stats.sum += value;

View File

@@ -56,12 +56,12 @@ fn main() -> tantivy::Result<()> {
); );
let top_docs_by_custom_score = let top_docs_by_custom_score =
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| { TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
let ingredient_reader = segment_reader.facet_reader(ingredient).unwrap(); let mut ingredient_reader = segment_reader.facet_reader(ingredient).unwrap();
let facet_dict = ingredient_reader.facet_dict(); let facet_dict = ingredient_reader.facet_dict();
let query_ords: HashSet<u64> = facets let query_ords: HashSet<u64> = facets
.iter() .iter()
.filter_map(|key| facet_dict.term_ord(key.encoded_str()).unwrap()) .filter_map(|key| facet_dict.term_ord(key.encoded_str()))
.collect(); .collect();
let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20); let mut facet_ords_buffer: Vec<u64> = Vec::with_capacity(20);

View File

@@ -45,7 +45,7 @@ fn main() -> tantivy::Result<()> {
// Inverted index stands for the combination of // Inverted index stands for the combination of
// - the term dictionary // - the term dictionary
// - the inverted lists associated to each terms and their positions // - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title)?; let inverted_index = segment_reader.inverted_index(title);
// A `Term` is a text token associated with a field. // A `Term` is a text token associated with a field.
// Let's go through all docs containing the term `title:the` and access their position // Let's go through all docs containing the term `title:the` and access their position
@@ -58,7 +58,7 @@ fn main() -> tantivy::Result<()> {
// If you don't need all this information, you may get better performance by decompressing less // If you don't need all this information, you may get better performance by decompressing less
// information. // information.
if let Some(mut segment_postings) = if let Some(mut segment_postings) =
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)? inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
{ {
// this buffer will be used to request for positions // this buffer will be used to request for positions
let mut positions: Vec<u32> = Vec::with_capacity(100); let mut positions: Vec<u32> = Vec::with_capacity(100);
@@ -106,7 +106,7 @@ fn main() -> tantivy::Result<()> {
// Inverted index stands for the combination of // Inverted index stands for the combination of
// - the term dictionary // - the term dictionary
// - the inverted lists associated to each terms and their positions // - the inverted lists associated to each terms and their positions
let inverted_index = segment_reader.inverted_index(title)?; let inverted_index = segment_reader.inverted_index(title);
// This segment posting object is like a cursor over the documents matching the term. // This segment posting object is like a cursor over the documents matching the term.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies // The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
@@ -115,7 +115,7 @@ fn main() -> tantivy::Result<()> {
// If you don't need all this information, you may get better performance by decompressing less // If you don't need all this information, you may get better performance by decompressing less
// information. // information.
if let Some(mut block_segment_postings) = if let Some(mut block_segment_postings) =
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)? inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
{ {
loop { loop {
let docs = block_segment_postings.docs(); let docs = block_segment_postings.docs();

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy-query-grammar" name = "tantivy-query-grammar"
version = "0.14.0-dev" version = "0.13.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]

View File

@@ -31,12 +31,22 @@ impl Occur {
/// Compose two occur values. /// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur { pub fn compose(left: Occur, right: Occur) -> Occur {
match (left, right) { match left {
(Occur::Should, _) => right, Occur::Should => right,
(Occur::Must, Occur::MustNot) => Occur::MustNot, Occur::Must => {
(Occur::Must, _) => Occur::Must, if right == Occur::MustNot {
(Occur::MustNot, Occur::MustNot) => Occur::Must, Occur::MustNot
(Occur::MustNot, _) => Occur::MustNot, } else {
Occur::Must
}
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
} }
} }
} }
@@ -46,27 +56,3 @@ impl fmt::Display for Occur {
f.write_char(self.to_char()) f.write_char(self.to_char())
} }
} }
#[cfg(test)]
mod test {
use crate::Occur;
#[test]
fn test_occur_compose() {
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
assert_eq!(
Occur::compose(Occur::Should, Occur::MustNot),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
assert_eq!(
Occur::compose(Occur::MustNot, Occur::Should),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
}
}

View File

@@ -9,10 +9,8 @@ use combine::{
fn field<'a>() -> impl Parser<&'a str, Output = String> { fn field<'a>() -> impl Parser<&'a str, Output = String> {
( (
(letter().or(char('_'))), letter(),
many(satisfy(|c: char| { many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
c.is_alphanumeric() || c == '_' || c == '-'
})),
) )
.skip(char(':')) .skip(char(':'))
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2)) .map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
@@ -182,7 +180,7 @@ fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAS
(optional(occur_symbol()), boosted_leaf()) (optional(occur_symbol()), boosted_leaf())
} }
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> { fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
(many1(digit()), optional((char('.'), many1(digit())))).map( (many1(digit()), optional((char('.'), many1(digit())))).map(
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| { |(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
let mut float_str = int_part; let mut float_str = int_part;
@@ -190,18 +188,18 @@ fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
float_str.push(chr); float_str.push(chr);
float_str.push_str(&decimal_str); float_str.push_str(&decimal_str);
} }
float_str.parse::<f64>().unwrap() float_str.parse::<f32>().unwrap()
}, },
) )
} }
fn boost<'a>() -> impl Parser<&'a str, Output = f64> { fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
(char('^'), positive_float_number()).map(|(_, boost)| boost) (char('^'), positive_float_number()).map(|(_, boost)| boost)
} }
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> { fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt { (leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => { Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
UserInputAST::Boost(Box::new(leaf), boost) UserInputAST::Boost(Box::new(leaf), boost)
} }
_ => leaf, _ => leaf,
@@ -281,16 +279,14 @@ pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
type TestParseResult = Result<(), StringStreamError>;
use super::*; use super::*;
use combine::parser::Parser; use combine::parser::Parser;
pub fn nearly_equals(a: f64, b: f64) -> bool { pub fn nearly_equals(a: f32, b: f32) -> bool {
(a - b).abs() < 0.0005 * (a + b).abs() (a - b).abs() < 0.0005 * (a + b).abs()
} }
fn assert_nearly_equals(expected: f64, val: f64) { fn assert_nearly_equals(expected: f32, val: f32) {
assert!( assert!(
nearly_equals(val, expected), nearly_equals(val, expected),
"Got {}, expected {}.", "Got {}, expected {}.",
@@ -300,15 +296,14 @@ mod test {
} }
#[test] #[test]
fn test_occur_symbol() -> TestParseResult { fn test_occur_symbol() {
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, "")); assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, "")); assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
Ok(())
} }
#[test] #[test]
fn test_positive_float_number() { fn test_positive_float_number() {
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) { fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
let (val, remaining) = positive_float_number().parse(float_str).unwrap(); let (val, remaining) = positive_float_number().parse(float_str).unwrap();
assert_eq!(remaining, expected_remaining); assert_eq!(remaining, expected_remaining);
assert_nearly_equals(val, expected_val); assert_nearly_equals(val, expected_val);
@@ -316,9 +311,9 @@ mod test {
fn error_parse(float_str: &str) { fn error_parse(float_str: &str) {
assert!(positive_float_number().parse(float_str).is_err()); assert!(positive_float_number().parse(float_str).is_err());
} }
valid_parse("1.0", 1.0, ""); valid_parse("1.0", 1.0f32, "");
valid_parse("1", 1.0, ""); valid_parse("1", 1.0f32, "");
valid_parse("0.234234 aaa", 0.234234f64, " aaa"); valid_parse("0.234234 aaa", 0.234234f32, " aaa");
error_parse(".3332"); error_parse(".3332");
error_parse("1."); error_parse("1.");
error_parse("-1."); error_parse("-1.");
@@ -415,25 +410,6 @@ mod test {
assert_eq!(format!("{:?}", ast), "\"abc\""); assert_eq!(format!("{:?}", ast), "\"abc\"");
} }
#[test]
fn test_field_name() -> TestParseResult {
assert_eq!(
super::field().parse("my-field-name:a")?,
("my-field-name".to_string(), "a")
);
assert_eq!(
super::field().parse("my_field_name:a")?,
("my_field_name".to_string(), "a")
);
assert!(super::field().parse(":a").is_err());
assert!(super::field().parse("-my_field:a").is_err());
assert_eq!(
super::field().parse("_my_field:a")?,
("_my_field".to_string(), "a")
);
Ok(())
}
#[test] #[test]
fn test_range_parser() { fn test_range_parser() {
// testing the range() parser separately // testing the range() parser separately

View File

@@ -87,7 +87,7 @@ impl UserInputBound {
pub enum UserInputAST { pub enum UserInputAST {
Clause(Vec<(Option<Occur>, UserInputAST)>), Clause(Vec<(Option<Occur>, UserInputAST)>),
Leaf(Box<UserInputLeaf>), Leaf(Box<UserInputLeaf>),
Boost(Box<UserInputAST>, f64), Boost(Box<UserInputAST>, f32),
} }
impl UserInputAST { impl UserInputAST {

View File

@@ -96,18 +96,18 @@ mod tests {
} }
{ {
let mut count_collector = SegmentCountCollector::default(); let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1.0); count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1); assert_eq!(count_collector.harvest(), 1);
} }
{ {
let mut count_collector = SegmentCountCollector::default(); let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1.0); count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1); assert_eq!(count_collector.harvest(), 1);
} }
{ {
let mut count_collector = SegmentCountCollector::default(); let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1.0); count_collector.collect(0u32, 1f32);
count_collector.collect(1u32, 1.0); count_collector.collect(1u32, 1f32);
assert_eq!(count_collector.harvest(), 2); assert_eq!(count_collector.harvest(), 2);
} }
} }

View File

@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore> impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
where where
TCustomScorer: CustomScorer<TScore> + Send + Sync, TCustomScorer: CustomScorer<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync, TScore: 'static + PartialOrd + Clone + Send + Sync,
{ {
type Fruit = Vec<(TScore, DocAddress)>; type Fruit = Vec<(TScore, DocAddress)>;
@@ -58,10 +58,10 @@ where
segment_local_id: u32, segment_local_id: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<Self::Child> { ) -> crate::Result<Self::Child> {
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
let segment_collector = self let segment_collector = self
.collector .collector
.for_segment(segment_local_id, segment_reader)?; .for_segment(segment_local_id, segment_reader)?;
let segment_scorer = self.custom_scorer.segment_scorer(segment_reader)?;
Ok(CustomScoreTopSegmentCollector { Ok(CustomScoreTopSegmentCollector {
segment_collector, segment_collector,
segment_scorer, segment_scorer,

View File

@@ -1,61 +0,0 @@
use std::collections::HashSet;
use crate::{DocAddress, DocId, Score};
use super::{Collector, SegmentCollector};
/// Collectors that returns the set of DocAddress that matches the query.
///
/// This collector is mostly useful for tests.
pub struct DocSetCollector;
impl Collector for DocSetCollector {
type Fruit = HashSet<DocAddress>;
type Child = DocSetChildCollector;
fn for_segment(
&self,
segment_local_id: crate::SegmentLocalId,
_segment: &crate::SegmentReader,
) -> crate::Result<Self::Child> {
Ok(DocSetChildCollector {
segment_local_id,
docs: HashSet::new(),
})
}
fn requires_scoring(&self) -> bool {
false
}
fn merge_fruits(
&self,
segment_fruits: Vec<(u32, HashSet<DocId>)>,
) -> crate::Result<Self::Fruit> {
let len: usize = segment_fruits.iter().map(|(_, docset)| docset.len()).sum();
let mut result = HashSet::with_capacity(len);
for (segment_local_id, docs) in segment_fruits {
for doc in docs {
result.insert(DocAddress(segment_local_id, doc));
}
}
Ok(result)
}
}
pub struct DocSetChildCollector {
segment_local_id: u32,
docs: HashSet<DocId>,
}
impl SegmentCollector for DocSetChildCollector {
type Fruit = (u32, HashSet<DocId>);
fn collect(&mut self, doc: crate::DocId, _score: Score) {
self.docs.insert(doc);
}
fn harvest(self) -> (u32, HashSet<DocId>) {
(self.segment_local_id, self.docs)
}
}

View File

@@ -7,6 +7,7 @@ use crate::DocId;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
use crate::TantivyError;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::btree_map; use std::collections::btree_map;
use std::collections::BTreeMap; use std::collections::BTreeMap;
@@ -265,7 +266,10 @@ impl Collector for FacetCollector {
_: SegmentLocalId, _: SegmentLocalId,
reader: &SegmentReader, reader: &SegmentReader,
) -> crate::Result<FacetSegmentCollector> { ) -> crate::Result<FacetSegmentCollector> {
let facet_reader = reader.facet_reader(self.field)?; let field_name = reader.schema().get_field_name(self.field);
let facet_reader = reader.facet_reader(self.field).ok_or_else(|| {
TantivyError::SchemaError(format!("Field {:?} is not a facet field.", field_name))
})?;
let mut collapse_mapping = Vec::new(); let mut collapse_mapping = Vec::new();
let mut counts = Vec::new(); let mut counts = Vec::new();
@@ -274,7 +278,7 @@ impl Collector for FacetCollector {
let mut collapse_facet_it = self.facets.iter().peekable(); let mut collapse_facet_it = self.facets.iter().peekable();
collapse_facet_ords.push(0); collapse_facet_ords.push(0);
{ {
let mut facet_streamer = facet_reader.facet_dict().range().into_stream()?; let mut facet_streamer = facet_reader.facet_dict().range().into_stream();
if facet_streamer.advance() { if facet_streamer.advance() {
'outer: loop { 'outer: loop {
// at the begining of this loop, facet_streamer // at the begining of this loop, facet_streamer
@@ -368,12 +372,9 @@ impl SegmentCollector for FacetSegmentCollector {
} }
let mut facet = vec![]; let mut facet = vec![];
let facet_ord = self.collapse_facet_ords[collapsed_facet_ord]; let facet_ord = self.collapse_facet_ords[collapsed_facet_ord];
// TODO handle errors. facet_dict.ord_to_term(facet_ord as u64, &mut facet);
if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() { // TODO
if let Ok(facet) = Facet::from_encoded(facet) { facet_counts.insert(Facet::from_encoded(facet).unwrap(), count);
facet_counts.insert(facet, count);
}
}
} }
FacetCounts { facet_counts } FacetCounts { facet_counts }
} }
@@ -471,7 +472,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let num_facets: usize = 3 * 4 * 5; let num_facets: usize = 3 * 4 * 5;
let facets: Vec<Facet> = (0..num_facets) let facets: Vec<Facet> = (0..num_facets)
.map(|mut n| { .map(|mut n| {
@@ -530,7 +531,7 @@ mod tests {
let facet_field = schema_builder.add_facet_field("facets"); let facet_field = schema_builder.add_facet_field("facets");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/subjects/A/a"), facet_field => Facet::from_text(&"/subjects/A/a"),
facet_field => Facet::from_text(&"/subjects/B/a"), facet_field => Facet::from_text(&"/subjects/B/a"),
@@ -549,12 +550,12 @@ mod tests {
} }
#[test] #[test]
fn test_doc_search_by_facet() -> crate::Result<()> { fn test_doc_search_by_facet() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet"); let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/A"), facet_field => Facet::from_text(&"/A/A"),
)); ));
@@ -567,8 +568,8 @@ mod tests {
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/D/C/A"), facet_field => Facet::from_text(&"/D/C/A"),
)); ));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 4); assert_eq!(searcher.num_docs(), 4);
@@ -585,17 +586,17 @@ mod tests {
assert_eq!(count_facet("/A/C"), 1); assert_eq!(count_facet("/A/C"), 1);
assert_eq!(count_facet("/A/C/A"), 1); assert_eq!(count_facet("/A/C/A"), 1);
assert_eq!(count_facet("/C/A"), 0); assert_eq!(count_facet("/C/A"), 0);
let query_parser = QueryParser::for_index(&index, vec![]);
{ {
let query = query_parser.parse_query("facet:/A/B")?; let query_parser = QueryParser::for_index(&index, vec![]);
assert_eq!(1, searcher.search(&query, &Count).unwrap()); {
let query = query_parser.parse_query("facet:/A/B").unwrap();
assert_eq!(1, searcher.search(&query, &Count).unwrap());
}
{
let query = query_parser.parse_query("facet:/A").unwrap();
assert_eq!(3, searcher.search(&query, &Count).unwrap());
}
} }
{
let query = query_parser.parse_query("facet:/A")?;
assert_eq!(3, searcher.search(&query, &Count)?);
}
Ok(())
} }
#[test] #[test]
@@ -630,7 +631,7 @@ mod tests {
.collect(); .collect();
docs[..].shuffle(&mut thread_rng()); docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs { for doc in docs {
index_writer.add_document(doc); index_writer.add_document(doc);
} }
@@ -683,7 +684,7 @@ mod bench {
// 40425 docs // 40425 docs
docs[..].shuffle(&mut thread_rng()); docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs { for doc in docs {
index_writer.add_document(doc); index_writer.add_document(doc);
} }

View File

@@ -1,189 +0,0 @@
// # Custom collector example
//
// This example shows how you can implement your own
// collector. As an example, we will compute a collector
// that computes the standard deviation of a given fast field.
//
// Of course, you can have a look at the tantivy's built-in collectors
// such as the `CountCollector` for more examples.
// ---
// Importing tantivy...
use std::marker::PhantomData;
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{FastFieldReader, FastValue};
use crate::schema::Field;
use crate::{Score, SegmentReader, TantivyError};
/// The `FilterCollector` collector filters docs using a u64 fast field value and a predicate.
/// Only the documents for which the predicate returned "true" will be passed on to the next collector.
///
/// ```rust
/// use tantivy::collector::{TopDocs, FilterCollector};
/// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT, INDEXED, FAST};
/// use tantivy::{doc, DocAddress, Index};
///
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let price = schema_builder.add_u64_field("price", INDEXED | FAST);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64));
/// index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64));
/// index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64));
/// assert!(index_writer.commit().is_ok());
///
/// let reader = index.reader().unwrap();
/// let searcher = reader.searcher();
///
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let no_filter_collector = FilterCollector::new(price, &|value: u64| value > 20_120u64, TopDocs::with_limit(2));
/// let top_docs = searcher.search(&query, &no_filter_collector).unwrap();
///
/// assert_eq!(top_docs.len(), 1);
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
///
/// let filter_all_collector: FilterCollector<_, _, u64> = FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
/// let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
///
/// assert_eq!(filtered_top_docs.len(), 0);
/// ```
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: FastValue>
where
TPredicate: 'static,
{
field: Field,
collector: TCollector,
predicate: &'static TPredicate,
t_predicate_value: PhantomData<TPredicateValue>,
}
impl<TCollector, TPredicate, TPredicateValue: FastValue>
FilterCollector<TCollector, TPredicate, TPredicateValue>
where
TCollector: Collector + Send + Sync,
TPredicate: Fn(TPredicateValue) -> bool + Send + Sync,
{
/// Create a new FilterCollector.
pub fn new(
field: Field,
predicate: &'static TPredicate,
collector: TCollector,
) -> FilterCollector<TCollector, TPredicate, TPredicateValue> {
FilterCollector {
field,
predicate,
collector,
t_predicate_value: PhantomData,
}
}
}
impl<TCollector, TPredicate, TPredicateValue: FastValue> Collector
for FilterCollector<TCollector, TPredicate, TPredicateValue>
where
TCollector: Collector + Send + Sync,
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
TPredicateValue: 'static + FastValue,
{
// That's the type of our result.
// Our standard deviation will be a float.
type Fruit = TCollector::Fruit;
type Child = FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>;
fn for_segment(
&self,
segment_local_id: u32,
segment_reader: &SegmentReader,
) -> crate::Result<FilterSegmentCollector<TCollector::Child, TPredicate, TPredicateValue>> {
let schema = segment_reader.schema();
let field_entry = schema.get_field_entry(self.field);
if !field_entry.is_fast() {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is not a fast field.",
field_entry.name()
)));
}
let requested_type = TPredicateValue::to_type();
let field_schema_type = field_entry.field_type().value_type();
if requested_type != field_schema_type {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is of type {:?}!={:?}",
field_entry.name(),
requested_type,
field_schema_type
)));
}
let fast_field_reader = segment_reader
.fast_fields()
.typed_fast_field_reader(self.field)
.ok_or_else(|| {
TantivyError::SchemaError(format!(
"{:?} is not declared as a fast field in the schema.",
self.field
))
})?;
let segment_collector = self
.collector
.for_segment(segment_local_id, segment_reader)?;
Ok(FilterSegmentCollector {
fast_field_reader,
segment_collector,
predicate: self.predicate,
t_predicate_value: PhantomData,
})
}
fn requires_scoring(&self) -> bool {
self.collector.requires_scoring()
}
fn merge_fruits(
&self,
segment_fruits: Vec<<TCollector::Child as SegmentCollector>::Fruit>,
) -> crate::Result<TCollector::Fruit> {
self.collector.merge_fruits(segment_fruits)
}
}
pub struct FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
where
TPredicate: 'static,
TPredicateValue: 'static + FastValue,
{
fast_field_reader: FastFieldReader<TPredicateValue>,
segment_collector: TSegmentCollector,
predicate: &'static TPredicate,
t_predicate_value: PhantomData<TPredicateValue>,
}
impl<TSegmentCollector, TPredicate, TPredicateValue> SegmentCollector
for FilterSegmentCollector<TSegmentCollector, TPredicate, TPredicateValue>
where
TSegmentCollector: SegmentCollector,
TPredicate: 'static + Fn(TPredicateValue) -> bool + Send + Sync,
TPredicateValue: 'static + FastValue,
{
type Fruit = TSegmentCollector::Fruit;
fn collect(&mut self, doc: u32, score: Score) {
let value = self.fast_field_reader.get(doc);
if (self.predicate)(value) {
self.segment_collector.collect(doc, score)
}
}
fn harvest(self) -> <TSegmentCollector as SegmentCollector>::Fruit {
self.segment_collector.harvest()
}
}

View File

@@ -0,0 +1,127 @@
use std::cmp::Eq;
use std::collections::HashMap;
use std::hash::Hash;
use collector::Collector;
use fastfield::FastFieldReader;
use schema::Field;
use DocId;
use Result;
use Score;
use SegmentReader;
use SegmentLocalId;
/// Facet collector for i64/u64 fast field
pub struct IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
counters: HashMap<T::ValueType, u64>,
field: Field,
ff_reader: Option<T>,
}
impl<T> IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
/// Creates a new facet collector for aggregating a given field.
pub fn new(field: Field) -> IntFacetCollector<T> {
IntFacetCollector {
counters: HashMap::new(),
field: field,
ff_reader: None,
}
}
}
impl<T> Collector for IntFacetCollector<T>
where
T: FastFieldReader,
T::ValueType: Eq + Hash,
{
fn set_segment(&mut self, _: SegmentLocalId, reader: &SegmentReader) -> Result<()> {
self.ff_reader = Some(reader.get_fast_field_reader(self.field)?);
Ok(())
}
fn collect(&mut self, doc: DocId, _: Score) {
let val = self.ff_reader
.as_ref()
.expect(
"collect() was called before set_segment. \
This should never happen.",
)
.get(doc);
*(self.counters.entry(val).or_insert(0)) += 1;
}
}
#[cfg(test)]
mod tests {
use collector::{chain, IntFacetCollector};
use query::QueryParser;
use fastfield::{I64FastFieldReader, U64FastFieldReader};
use schema::{self, FAST, STRING};
use Index;
#[test]
// create 10 documents, set num field value to 0 or 1 for even/odd ones
// make sure we have facet counters correctly filled
fn test_facet_collector_results() {
let mut schema_builder = schema::Schema::builder();
let num_field_i64 = schema_builder.add_i64_field("num_i64", FAST);
let num_field_u64 = schema_builder.add_u64_field("num_u64", FAST);
let num_field_f64 = schema_builder.add_f64_field("num_f64", FAST);
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for i in 0u64..10u64 {
index_writer.add_document(doc!(
num_field_i64 => ((i as i64) % 3i64) as i64,
num_field_u64 => (i % 2u64) as u64,
num_field_f64 => (i % 4u64) as f64,
text_field => "text"
));
}
}
assert_eq!(index_writer.commit().unwrap(), 10u64);
}
let searcher = index.reader().searcher();
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);
let mut ffvf_f64: IntFacetCollector<F64FastFieldReader> = IntFacetCollector::new(num_field_f64);
{
// perform the query
let mut facet_collectors = chain().push(&mut ffvf_i64).push(&mut ffvf_u64).push(&mut ffvf_f64);
let mut query_parser = QueryParser::for_index(index, vec![text_field]);
let query = query_parser.parse_query("text:text").unwrap();
query.search(&searcher, &mut facet_collectors).unwrap();
}
assert_eq!(ffvf_u64.counters[&0], 5);
assert_eq!(ffvf_u64.counters[&1], 5);
assert_eq!(ffvf_i64.counters[&0], 4);
assert_eq!(ffvf_i64.counters[&1], 3);
assert_eq!(ffvf_f64.counters[&0.0], 3);
assert_eq!(ffvf_f64.counters[&2.0], 2);
}
}

View File

@@ -111,12 +111,6 @@ mod facet_collector;
pub use self::facet_collector::FacetCollector; pub use self::facet_collector::FacetCollector;
use crate::query::Weight; use crate::query::Weight;
mod docset_collector;
pub use self::docset_collector::DocSetCollector;
mod filter_collector_wrapper;
pub use self::filter_collector_wrapper::FilterCollector;
/// `Fruit` is the type for the result of our collection. /// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector. /// e.g. `usize` for the `Count` collector.
pub trait Fruit: Send + downcast_rs::Downcast {} pub trait Fruit: Send + downcast_rs::Downcast {}
@@ -139,13 +133,13 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
/// The collection logic itself is in the `SegmentCollector`. /// The collection logic itself is in the `SegmentCollector`.
/// ///
/// Segments are not guaranteed to be visited in any specific order. /// Segments are not guaranteed to be visited in any specific order.
pub trait Collector: Sync + Send { pub trait Collector: Sync {
/// `Fruit` is the type for the result of our collection. /// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector. /// e.g. `usize` for the `Count` collector.
type Fruit: Fruit; type Fruit: Fruit;
/// Type of the `SegmentCollector` associated to this collector. /// Type of the `SegmentCollector` associated to this collector.
type Child: SegmentCollector; type Child: SegmentCollector<Fruit = Self::Fruit>;
/// `set_segment` is called before beginning to enumerate /// `set_segment` is called before beginning to enumerate
/// on this segment. /// on this segment.
@@ -160,10 +154,7 @@ pub trait Collector: Sync + Send {
/// Combines the fruit associated to the collection of each segments /// Combines the fruit associated to the collection of each segments
/// into one fruit. /// into one fruit.
fn merge_fruits( fn merge_fruits(&self, segment_fruits: Vec<Self::Fruit>) -> crate::Result<Self::Fruit>;
&self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit>;
/// Created a segment collector and /// Created a segment collector and
fn collect_segment( fn collect_segment(
@@ -233,11 +224,11 @@ where
fn merge_fruits( fn merge_fruits(
&self, &self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>, children: Vec<(Left::Fruit, Right::Fruit)>,
) -> crate::Result<(Left::Fruit, Right::Fruit)> { ) -> crate::Result<(Left::Fruit, Right::Fruit)> {
let mut left_fruits = vec![]; let mut left_fruits = vec![];
let mut right_fruits = vec![]; let mut right_fruits = vec![];
for (left_fruit, right_fruit) in segment_fruits { for (left_fruit, right_fruit) in children {
left_fruits.push(left_fruit); left_fruits.push(left_fruit);
right_fruits.push(right_fruit); right_fruits.push(right_fruit);
} }
@@ -291,10 +282,7 @@ where
self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring() self.0.requires_scoring() || self.1.requires_scoring() || self.2.requires_scoring()
} }
fn merge_fruits( fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
&self,
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let mut one_fruits = vec![]; let mut one_fruits = vec![];
let mut two_fruits = vec![]; let mut two_fruits = vec![];
let mut three_fruits = vec![]; let mut three_fruits = vec![];
@@ -361,10 +349,7 @@ where
|| self.3.requires_scoring() || self.3.requires_scoring()
} }
fn merge_fruits( fn merge_fruits(&self, children: Vec<Self::Fruit>) -> crate::Result<Self::Fruit> {
&self,
children: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let mut one_fruits = vec![]; let mut one_fruits = vec![];
let mut two_fruits = vec![]; let mut two_fruits = vec![];
let mut three_fruits = vec![]; let mut three_fruits = vec![];

View File

@@ -34,13 +34,13 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
fn merge_fruits( fn merge_fruits(
&self, &self,
children: Vec<<Self::Child as SegmentCollector>::Fruit>, children: Vec<<Self as Collector>::Fruit>,
) -> crate::Result<Box<dyn Fruit>> { ) -> crate::Result<Box<dyn Fruit>> {
let typed_fruit: Vec<<TCollector::Child as SegmentCollector>::Fruit> = children let typed_fruit: Vec<TCollector::Fruit> = children
.into_iter() .into_iter()
.map(|untyped_fruit| { .map(|untyped_fruit| {
untyped_fruit untyped_fruit
.downcast::<<TCollector::Child as SegmentCollector>::Fruit>() .downcast::<TCollector::Fruit>()
.map(|boxed_but_typed| *boxed_but_typed) .map(|boxed_but_typed| *boxed_but_typed)
.map_err(|_| { .map_err(|_| {
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string()) TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
@@ -55,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
impl SegmentCollector for Box<dyn BoxableSegmentCollector> { impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
type Fruit = Box<dyn Fruit>; type Fruit = Box<dyn Fruit>;
fn collect(&mut self, doc: u32, score: Score) { fn collect(&mut self, doc: u32, score: f32) {
self.as_mut().collect(doc, score); self.as_mut().collect(doc, score);
} }
@@ -65,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
} }
pub trait BoxableSegmentCollector { pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: Score); fn collect(&mut self, doc: u32, score: f32);
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>; fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
} }
@@ -74,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
for SegmentCollectorWrapper<TSegmentCollector> for SegmentCollectorWrapper<TSegmentCollector>
{ {
fn collect(&mut self, doc: u32, score: Score) { fn collect(&mut self, doc: u32, score: f32) {
self.0.collect(doc, score); self.0.collect(doc, score);
} }
@@ -259,7 +259,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text=>"abc")); index_writer.add_document(doc!(text=>"abc"));
index_writer.add_document(doc!(text=>"abc abc abc")); index_writer.add_document(doc!(text=>"abc abc abc"));
index_writer.add_document(doc!(text=>"abc abc")); index_writer.add_document(doc!(text=>"abc abc"));

View File

@@ -8,13 +8,6 @@ use crate::DocId;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::collector::{FilterCollector, TopDocs};
use crate::query::QueryParser;
use crate::schema::{Schema, FAST, TEXT};
use crate::DateTime;
use crate::{doc, Index};
use std::str::FromStr;
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector { pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
compute_score: true, compute_score: true,
}; };
@@ -23,54 +16,6 @@ pub const TEST_COLLECTOR_WITHOUT_SCORE: TestCollector = TestCollector {
compute_score: true, compute_score: true,
}; };
#[test]
pub fn test_filter_collector() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let price = schema_builder.add_u64_field("price", FAST);
let date = schema_builder.add_date_field("date", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!(title => "The Name of the Wind", price => 30_200u64, date => DateTime::from_str("1898-04-09T00:00:00+00:00").unwrap()));
index_writer.add_document(doc!(title => "The Diary of Muadib", price => 29_240u64, date => DateTime::from_str("2020-04-09T00:00:00+00:00").unwrap()));
index_writer.add_document(doc!(title => "The Diary of Anne Frank", price => 18_240u64, date => DateTime::from_str("2019-04-20T00:00:00+00:00").unwrap()));
index_writer.add_document(doc!(title => "A Dairy Cow", price => 21_240u64, date => DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()));
index_writer.add_document(doc!(title => "The Diary of a Young Girl", price => 20_120u64, date => DateTime::from_str("2018-04-09T00:00:00+00:00").unwrap()));
assert!(index_writer.commit().is_ok());
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title]);
let query = query_parser.parse_query("diary").unwrap();
let filter_some_collector = FilterCollector::new(
price,
&|value: u64| value > 20_120u64,
TopDocs::with_limit(2),
);
let top_docs = searcher.search(&query, &filter_some_collector).unwrap();
assert_eq!(top_docs.len(), 1);
assert_eq!(top_docs[0].1, DocAddress(0, 1));
let filter_all_collector: FilterCollector<_, _, u64> =
FilterCollector::new(price, &|value| value < 5u64, TopDocs::with_limit(2));
let filtered_top_docs = searcher.search(&query, &filter_all_collector).unwrap();
assert_eq!(filtered_top_docs.len(), 0);
fn date_filter(value: DateTime) -> bool {
(value - DateTime::from_str("2019-04-09T00:00:00+00:00").unwrap()).num_weeks() > 0
}
let filter_dates_collector = FilterCollector::new(date, &date_filter, TopDocs::with_limit(5));
let filtered_date_docs = searcher.search(&query, &filter_dates_collector).unwrap();
assert_eq!(filtered_date_docs.len(), 2);
}
/// Stores all of the doc ids. /// Stores all of the doc ids.
/// This collector is only used for tests. /// This collector is only used for tests.
/// It is unusable in pr /// It is unusable in pr
@@ -240,15 +185,12 @@ impl Collector for BytesFastFieldTestCollector {
_segment_local_id: u32, _segment_local_id: u32,
segment_reader: &SegmentReader, segment_reader: &SegmentReader,
) -> crate::Result<BytesFastFieldSegmentCollector> { ) -> crate::Result<BytesFastFieldSegmentCollector> {
let reader = segment_reader
.fast_fields()
.bytes(self.field)
.ok_or_else(|| {
crate::TantivyError::InvalidArgument("Field is not a bytes fast field.".to_string())
})?;
Ok(BytesFastFieldSegmentCollector { Ok(BytesFastFieldSegmentCollector {
vals: Vec::new(), vals: Vec::new(),
reader, reader: segment_reader
.fast_fields()
.bytes(self.field)
.expect("Field is not a bytes fast field."),
}) })
} }
@@ -264,7 +206,7 @@ impl Collector for BytesFastFieldTestCollector {
impl SegmentCollector for BytesFastFieldSegmentCollector { impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>; type Fruit = Vec<u8>;
fn collect(&mut self, doc: u32, _score: Score) { fn collect(&mut self, doc: u32, _score: f32) {
let data = self.reader.get_bytes(doc); let data = self.reader.get_bytes(doc);
self.vals.extend(data); self.vals.extend(data);
} }

View File

@@ -1,4 +1,6 @@
use super::Collector; use super::Collector;
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
use crate::collector::top_collector::TopSegmentCollector;
use crate::collector::top_collector::{ComparableDoc, TopCollector}; use crate::collector::top_collector::{ComparableDoc, TopCollector};
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector; use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use crate::collector::{ use crate::collector::{
@@ -12,71 +14,8 @@ use crate::DocId;
use crate::Score; use crate::Score;
use crate::SegmentLocalId; use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue}; use std::collections::BinaryHeap;
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
use std::fmt; use std::fmt;
use std::{collections::BinaryHeap, marker::PhantomData};
struct FastFieldConvertCollector<
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
TFastValue: FastValue,
> {
pub collector: TCollector,
pub field: Field,
pub fast_value: std::marker::PhantomData<TFastValue>,
}
impl<TCollector, TFastValue> Collector for FastFieldConvertCollector<TCollector, TFastValue>
where
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
TFastValue: FastValue + 'static,
{
type Fruit = Vec<(TFastValue, DocAddress)>;
type Child = TCollector::Child;
fn for_segment(
&self,
segment_local_id: crate::SegmentLocalId,
segment: &SegmentReader,
) -> crate::Result<Self::Child> {
let schema = segment.schema();
let field_entry = schema.get_field_entry(self.field);
if !field_entry.is_fast() {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is not a fast field.",
field_entry.name()
)));
}
let schema_type = TFastValue::to_type();
let requested_type = field_entry.field_type().value_type();
if schema_type != requested_type {
return Err(TantivyError::SchemaError(format!(
"Field {:?} is of type {:?}!={:?}",
field_entry.name(),
schema_type,
requested_type
)));
}
self.collector.for_segment(segment_local_id, segment)
}
fn requires_scoring(&self) -> bool {
self.collector.requires_scoring()
}
fn merge_fruits(
&self,
segment_fruits: Vec<<Self::Child as SegmentCollector>::Fruit>,
) -> crate::Result<Self::Fruit> {
let raw_result = self.collector.merge_fruits(segment_fruits)?;
let transformed_result = raw_result
.into_iter()
.map(|(score, doc_address)| (TFastValue::from_u64(score), doc_address))
.collect::<Vec<_>>();
Ok(transformed_result)
}
}
/// The `TopDocs` collector keeps track of the top `K` documents /// The `TopDocs` collector keeps track of the top `K` documents
/// sorted by their score. /// sorted by their score.
@@ -99,7 +38,7 @@ where
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// ///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap(); /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib")); /// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow")); /// index_writer.add_document(doc!(title => "A Dairy Cow"));
@@ -113,8 +52,8 @@ where
/// let query = query_parser.parse_query("diary").unwrap(); /// let query = query_parser.parse_query("diary").unwrap();
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap(); /// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
/// ///
/// assert_eq!(top_docs[0].1, DocAddress(0, 1)); /// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// assert_eq!(top_docs[1].1, DocAddress(0, 3)); /// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// ``` /// ```
pub struct TopDocs(TopCollector<Score>); pub struct TopDocs(TopCollector<Score>);
@@ -134,7 +73,7 @@ struct ScorerByFastFieldReader {
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader { impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
fn score(&mut self, doc: DocId) -> u64 { fn score(&mut self, doc: DocId) -> u64 {
self.ff_reader.get(doc) self.ff_reader.get_u64(u64::from(doc))
} }
} }
@@ -148,10 +87,10 @@ impl CustomScorer<u64> for ScorerByField {
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> { fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
let ff_reader = segment_reader let ff_reader = segment_reader
.fast_fields() .fast_fields()
.u64_lenient(self.field) .u64(self.field)
.ok_or_else(|| { .ok_or_else(|| {
crate::TantivyError::SchemaError(format!( crate::TantivyError::SchemaError(format!(
"Field requested ({:?}) is not a fast field.", "Field requested ({:?}) is not a i64/u64 fast field.",
self.field self.field
)) ))
})?; })?;
@@ -173,8 +112,6 @@ impl TopDocs {
/// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in /// This is equivalent to `OFFSET` in MySQL or PostgreSQL and `start` in
/// Lucene's TopDocsCollector. /// Lucene's TopDocsCollector.
/// ///
/// # Example
///
/// ```rust /// ```rust
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
@@ -186,7 +123,7 @@ impl TopDocs {
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// ///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap(); /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib")); /// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow")); /// index_writer.add_document(doc!(title => "A Dairy Cow"));
@@ -202,8 +139,8 @@ impl TopDocs {
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap(); /// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
/// ///
/// assert_eq!(top_docs.len(), 2); /// assert_eq!(top_docs.len(), 2);
/// assert_eq!(top_docs[0].1, DocAddress(0, 4)); /// assert_eq!(&top_docs[0], &(0.5204813, DocAddress(0, 4)));
/// assert_eq!(top_docs[1].1, DocAddress(0, 3)); /// assert_eq!(&top_docs[1], &(0.4793185, DocAddress(0, 3)));
/// ``` /// ```
pub fn and_offset(self, offset: usize) -> TopDocs { pub fn and_offset(self, offset: usize) -> TopDocs {
TopDocs(self.0.and_offset(offset)) TopDocs(self.0.and_offset(offset))
@@ -211,14 +148,6 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field. /// Set top-K to rank documents by a given fast field.
/// ///
/// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
/// An error will be returned at the moment of search.
///
/// If the field is a FAST field but not a u64 field, search will return successfully but it will return
/// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
///
/// # Example
///
/// ```rust /// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT}; /// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress}; /// # use tantivy::{doc, Index, DocAddress};
@@ -234,13 +163,13 @@ impl TopDocs {
/// # let schema = schema_builder.build(); /// # let schema = schema_builder.build();
/// # /// #
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64)); /// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64)); /// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64)); /// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64)); /// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # assert!(index_writer.commit().is_ok()); /// # assert!(index_writer.commit().is_ok());
/// # let reader = index.reader()?; /// # let reader = index.reader().unwrap();
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?; /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?; /// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
@@ -248,20 +177,25 @@ impl TopDocs {
/// # (80u64, DocAddress(0u32, 3))]); /// # (80u64, DocAddress(0u32, 3))]);
/// # Ok(()) /// # Ok(())
/// # } /// # }
///
///
/// /// Searches the document matching the given query, and /// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the u64-`field` /// /// collects the top 10 documents, order by the u64-`field`
/// /// given in argument. /// /// given in argument.
/// ///
/// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(searcher: &Searcher, /// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &dyn Query, /// query: &dyn Query,
/// rating_field: Field) /// sort_by_field: Field)
/// -> tantivy::Result<Vec<(u64, DocAddress)>> { /// -> tantivy::Result<Vec<(u64, DocAddress)>> {
/// ///
/// // This is where we build our topdocs collector /// // This is where we build our topdocs collector
/// // /// //
/// // Note the `rating_field` needs to be a FAST field here. /// // Note the generics parameter that needs to match the
/// let top_books_by_rating = TopDocs /// // type `sort_by_field`.
/// let top_docs_by_rating = TopDocs
/// ::with_limit(10) /// ::with_limit(10)
/// .order_by_u64_field(rating_field); /// .order_by_u64_field(sort_by_field);
/// ///
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for /// // The `u64` in the pair is the value of our fast field for
@@ -271,105 +205,21 @@ impl TopDocs {
/// // length of 10, or less if not enough documents matched the /// // length of 10, or less if not enough documents matched the
/// // query. /// // query.
/// let resulting_docs: Vec<(u64, DocAddress)> = /// let resulting_docs: Vec<(u64, DocAddress)> =
/// searcher.search(query, &top_books_by_rating)?; /// searcher.search(query, &top_docs_by_rating)?;
/// ///
/// Ok(resulting_docs) /// Ok(resulting_docs)
/// } /// }
/// ``` /// ```
/// ///
/// # See also /// # Panics
/// ///
/// To confortably work with `u64`s, `i64`s, `f64`s, or `date`s, please refer to /// May panic if the field requested is not a fast field.
/// [.order_by_fast_field(...)](#method.order_by_fast_field) method. ///
pub fn order_by_u64_field( pub fn order_by_u64_field(
self, self,
field: Field, field: Field,
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> { ) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
CustomScoreTopCollector::new(ScorerByField { field }, self.0.into_tscore()) self.custom_score(ScorerByField { field })
}
/// Set top-K to rank documents by a given fast field.
///
/// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
/// but an explicit error will be returned at the moment of collection.
///
/// Note that this method is a generic. The requested fast field type will be often
/// inferred in your code by the rust compiler.
///
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
/// field until the last moment.
///
/// # Example
///
/// ```rust
/// # use tantivy::schema::{Schema, FAST, TEXT};
/// # use tantivy::{doc, Index, DocAddress};
/// # use tantivy::query::{Query, AllQuery};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field;
///
/// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("company", TEXT);
/// # let rating = schema_builder.add_i64_field("revenue", FAST);
/// # let schema = schema_builder.build();
/// #
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// # index_writer.add_document(doc!(title => "MadCow Inc.", rating => 92_000_000i64));
/// # index_writer.add_document(doc!(title => "Zozo Cow KKK", rating => 119_000_000i64));
/// # index_writer.add_document(doc!(title => "Declining Cow", rating => -63_000_000i64));
/// # assert!(index_writer.commit().is_ok());
/// # let reader = index.reader()?;
/// # let top_docs = docs_sorted_by_revenue(&reader.searcher(), &AllQuery, rating)?;
/// # assert_eq!(top_docs,
/// # vec![(119_000_000i64, DocAddress(0, 1)),
/// # (92_000_000i64, DocAddress(0, 0))]);
/// # Ok(())
/// # }
/// /// Searches the document matching the given query, and
/// /// collects the top 10 documents, order by the u64-`field`
/// /// given in argument.
/// fn docs_sorted_by_revenue(searcher: &Searcher,
/// query: &dyn Query,
/// revenue_field: Field)
/// -> tantivy::Result<Vec<(i64, DocAddress)>> {
///
/// // This is where we build our topdocs collector
/// //
/// // Note the generics parameter that needs to match the
/// // type `sort_by_field`. revenue_field here is a FAST i64 field.
/// let top_company_by_revenue = TopDocs
/// ::with_limit(2)
/// .order_by_fast_field(revenue_field);
///
/// // ... and here are our documents. Note this is a simple vec.
/// // The `i64` in the pair is the value of our fast field for
/// // each documents.
/// //
/// // The vec is sorted decreasingly by `sort_by_field`, and has a
/// // length of 10, or less if not enough documents matched the
/// // query.
/// let resulting_docs: Vec<(i64, DocAddress)> =
/// searcher.search(query, &top_company_by_revenue)?;
///
/// Ok(resulting_docs)
/// }
/// ```
pub fn order_by_fast_field<TFastValue>(
self,
fast_field: Field,
) -> impl Collector<Fruit = Vec<(TFastValue, DocAddress)>>
where
TFastValue: FastValue + 'static,
{
let u64_collector = self.order_by_u64_field(fast_field);
FastFieldConvertCollector {
collector: u64_collector,
field: fast_field,
fast_value: PhantomData,
}
} }
/// Ranks the documents using a custom score. /// Ranks the documents using a custom score.
@@ -414,7 +264,7 @@ impl TopDocs {
/// fn create_index() -> tantivy::Result<Index> { /// fn create_index() -> tantivy::Result<Index> {
/// let schema = create_schema(); /// let schema = create_schema();
/// let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// let product_name = index.schema().get_field("product_name").unwrap(); /// let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64)); /// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
@@ -453,7 +303,7 @@ impl TopDocs {
/// let popularity: u64 = popularity_reader.get(doc); /// let popularity: u64 = popularity_reader.get(doc);
/// // Well.. For the sake of the example we use a simple logarithm /// // Well.. For the sake of the example we use a simple logarithm
/// // function. /// // function.
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2(); /// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
/// popularity_boost_score * original_score /// popularity_boost_score * original_score
/// } /// }
/// }); /// });
@@ -474,7 +324,7 @@ impl TopDocs {
where where
TScore: 'static + Send + Sync + Clone + PartialOrd, TScore: 'static + Send + Sync + Clone + PartialOrd,
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static, TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync, TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
{ {
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore()) TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
} }
@@ -521,7 +371,7 @@ impl TopDocs {
/// # fn main() -> tantivy::Result<()> { /// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema(); /// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap(); /// # let product_name = index.schema().get_field("product_name").unwrap();
/// # /// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// let popularity: Field = index.schema().get_field("popularity").unwrap();
@@ -588,7 +438,7 @@ impl TopDocs {
where where
TScore: 'static + Send + Sync + Clone + PartialOrd, TScore: 'static + Send + Sync + Clone + PartialOrd,
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static, TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync, TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
{ {
CustomScoreTopCollector::new(custom_score, self.0.into_tscore()) CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
} }
@@ -629,7 +479,7 @@ impl Collector for TopDocs {
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len); let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
if let Some(delete_bitset) = reader.delete_bitset() { if let Some(delete_bitset) = reader.delete_bitset() {
let mut threshold = Score::MIN; let mut threshold = f32::MIN;
weight.for_each_pruning(threshold, reader, &mut |doc, score| { weight.for_each_pruning(threshold, reader, &mut |doc, score| {
if delete_bitset.is_deleted(doc) { if delete_bitset.is_deleted(doc) {
return threshold; return threshold;
@@ -641,16 +491,16 @@ impl Collector for TopDocs {
if heap.len() < heap_len { if heap.len() < heap_len {
heap.push(heap_item); heap.push(heap_item);
if heap.len() == heap_len { if heap.len() == heap_len {
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN); threshold = heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
} }
return threshold; return threshold;
} }
*heap.peek_mut().unwrap() = heap_item; *heap.peek_mut().unwrap() = heap_item;
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN); threshold = heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN);
threshold threshold
})?; })?;
} else { } else {
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| { weight.for_each_pruning(f32::MIN, reader, &mut |doc, score| {
let heap_item = ComparableDoc { let heap_item = ComparableDoc {
feature: score, feature: score,
doc, doc,
@@ -659,13 +509,13 @@ impl Collector for TopDocs {
heap.push(heap_item); heap.push(heap_item);
// TODO the threshold is suboptimal for heap.len == heap_len // TODO the threshold is suboptimal for heap.len == heap_len
if heap.len() == heap_len { if heap.len() == heap_len {
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN); return heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
} else { } else {
return Score::MIN; return f32::MIN;
} }
} }
*heap.peek_mut().unwrap() = heap_item; *heap.peek_mut().unwrap() = heap_item;
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN) heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN)
})?; })?;
} }
@@ -711,7 +561,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"Hello happy tax payer.")); index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer")); index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
index_writer.add_document(doc!(text_field=>"I like Droopy")); index_writer.add_document(doc!(text_field=>"I like Droopy"));
@@ -720,15 +570,8 @@ mod tests {
index index
} }
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
for (result, expected) in results.iter().zip(expected.iter()) {
assert_eq!(result.1, expected.1);
crate::assert_nearly_equals!(result.0, expected.0);
}
}
#[test] #[test]
fn test_top_collector_not_at_capacity_without_offset() { fn test_top_collector_not_at_capacity() {
let index = make_index(); let index = make_index();
let field = index.schema().get_field("text").unwrap(); let field = index.schema().get_field("text").unwrap();
let query_parser = QueryParser::for_index(&index, vec![field]); let query_parser = QueryParser::for_index(&index, vec![field]);
@@ -739,13 +582,13 @@ mod tests {
.searcher() .searcher()
.search(&text_query, &TopDocs::with_limit(4)) .search(&text_query, &TopDocs::with_limit(4))
.unwrap(); .unwrap();
assert_results_equals( assert_eq!(
&score_docs, score_docs,
&[ vec![
(0.81221175, DocAddress(0u32, 1)), (0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)), (0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0)), (0.48527452, DocAddress(0, 0))
], ]
); );
} }
@@ -761,7 +604,7 @@ mod tests {
.searcher() .searcher()
.search(&text_query, &TopDocs::with_limit(4).and_offset(2)) .search(&text_query, &TopDocs::with_limit(4).and_offset(2))
.unwrap(); .unwrap();
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]); assert_eq!(score_docs, vec![(0.48527452, DocAddress(0, 0))]);
} }
#[test] #[test]
@@ -776,12 +619,12 @@ mod tests {
.searcher() .searcher()
.search(&text_query, &TopDocs::with_limit(2)) .search(&text_query, &TopDocs::with_limit(2))
.unwrap(); .unwrap();
assert_results_equals( assert_eq!(
&score_docs, score_docs,
&[ vec![
(0.81221175, DocAddress(0u32, 1)), (0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)), (0.5376842, DocAddress(0u32, 2)),
], ]
); );
} }
@@ -797,12 +640,12 @@ mod tests {
.searcher() .searcher()
.search(&text_query, &TopDocs::with_limit(2).and_offset(1)) .search(&text_query, &TopDocs::with_limit(2).and_offset(1))
.unwrap(); .unwrap();
assert_results_equals( assert_eq!(
&score_docs[..], score_docs,
&[ vec![
(0.5376842, DocAddress(0u32, 2)), (0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0)), (0.48527452, DocAddress(0, 0))
], ]
); );
} }
@@ -863,8 +706,8 @@ mod tests {
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size); let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap(); let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!( assert_eq!(
&top_docs[..], top_docs,
&[ vec![
(64, DocAddress(0, 1)), (64, DocAddress(0, 1)),
(16, DocAddress(0, 2)), (16, DocAddress(0, 2)),
(12, DocAddress(0, 0)) (12, DocAddress(0, 0))
@@ -872,94 +715,6 @@ mod tests {
); );
} }
#[test]
fn test_top_field_collector_datetime() -> crate::Result<()> {
use std::str::FromStr;
let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", TEXT);
let birthday = schema_builder.add_date_field("birthday", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let pr_birthday = crate::DateTime::from_str("1898-04-09T00:00:00+00:00")?;
index_writer.add_document(doc!(
name => "Paul Robeson",
birthday => pr_birthday
));
let mr_birthday = crate::DateTime::from_str("1947-11-08T00:00:00+00:00")?;
index_writer.add_document(doc!(
name => "Minnie Riperton",
birthday => mr_birthday
));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field(birthday);
let top_docs: Vec<(crate::DateTime, DocAddress)> =
searcher.search(&AllQuery, &top_collector)?;
assert_eq!(
&top_docs[..],
&[
(mr_birthday, DocAddress(0, 1)),
(pr_birthday, DocAddress(0, 0)),
]
);
Ok(())
}
#[test]
fn test_top_field_collector_i64() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let city = schema_builder.add_text_field("city", TEXT);
let altitude = schema_builder.add_i64_field("altitude", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
city => "georgetown",
altitude => -1i64,
));
index_writer.add_document(doc!(
city => "tokyo",
altitude => 40i64,
));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
let top_docs: Vec<(i64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
assert_eq!(
&top_docs[..],
&[(40i64, DocAddress(0, 1)), (-1i64, DocAddress(0, 0)),]
);
Ok(())
}
#[test]
fn test_top_field_collector_f64() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let city = schema_builder.add_text_field("city", TEXT);
let altitude = schema_builder.add_f64_field("altitude", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
city => "georgetown",
altitude => -1.0f64,
));
index_writer.add_document(doc!(
city => "tokyo",
altitude => 40f64,
));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let top_collector = TopDocs::with_limit(3).order_by_fast_field(altitude);
let top_docs: Vec<(f64, DocAddress)> = searcher.search(&AllQuery, &top_collector)?;
assert_eq!(
&top_docs[..],
&[(40f64, DocAddress(0, 1)), (-1.0f64, DocAddress(0, 0)),]
);
Ok(())
}
#[test] #[test]
#[should_panic] #[should_panic]
fn test_field_does_not_exist() { fn test_field_does_not_exist() {
@@ -982,41 +737,29 @@ mod tests {
} }
#[test] #[test]
fn test_field_not_fast_field() -> crate::Result<()> { fn test_field_not_fast_field() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field(TITLE, TEXT);
let size = schema_builder.add_u64_field(SIZE, STORED); let size = schema_builder.add_u64_field(SIZE, STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let (index, _) = index("beer", title, schema, |index_writer| {
let mut index_writer = index.writer_for_tests()?; index_writer.add_document(doc!(
index_writer.add_document(doc!(size=>1u64)); title => "bottle of beer",
index_writer.commit()?; size => 12u64,
let searcher = index.reader()?.searcher(); ));
});
let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size); let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let err = top_collector.for_segment(0, segment).err().unwrap(); let err = top_collector.for_segment(0, segment);
assert!( if let Err(crate::TantivyError::SchemaError(msg)) = err {
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field requested (Field(0)) is not a fast field.") assert_eq!(
); msg,
Ok(()) "Field requested (Field(1)) is not a i64/u64 fast field."
} );
} else {
#[test] assert!(false);
fn test_field_wrong_type() -> crate::Result<()> { }
let mut schema_builder = Schema::builder();
let size = schema_builder.add_u64_field(SIZE, STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(size=>1u64));
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment = searcher.segment_reader(0);
let top_collector = TopDocs::with_limit(4).order_by_fast_field::<i64>(size);
let err = top_collector.for_segment(0, segment).err().unwrap();
assert!(
matches!(err, crate::TantivyError::SchemaError(msg) if msg == "Field \"size\" is not a fast field.")
);
Ok(())
} }
#[test] #[test]
@@ -1070,7 +813,8 @@ mod tests {
mut doc_adder: impl FnMut(&mut IndexWriter) -> (), mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
) -> (Index, Box<dyn Query>) { ) -> (Index, Box<dyn Query>) {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer); doc_adder(&mut index_writer);
index_writer.commit().unwrap(); index_writer.commit().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]); let query_parser = QueryParser::for_index(&index, vec![query_field]);

View File

@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore> impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
where where
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync, TScoreTweaker: ScoreTweaker<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync, TScore: 'static + PartialOrd + Clone + Send + Sync,
{ {
type Fruit = Vec<(TScore, DocAddress)>; type Fruit = Vec<(TScore, DocAddress)>;

View File

@@ -1,7 +1,6 @@
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use std::io; use std::io;
use std::ops::Deref;
use crate::directory::OwnedBytes;
pub(crate) struct BitPacker { pub(crate) struct BitPacker {
mini_buffer: u64, mini_buffer: u64,
@@ -61,14 +60,20 @@ impl BitPacker {
} }
#[derive(Clone)] #[derive(Clone)]
pub struct BitUnpacker { pub struct BitUnpacker<Data>
where
Data: Deref<Target = [u8]>,
{
num_bits: u64, num_bits: u64,
mask: u64, mask: u64,
data: OwnedBytes, data: Data,
} }
impl BitUnpacker { impl<Data> BitUnpacker<Data>
pub fn new(data: OwnedBytes, num_bits: u8) -> BitUnpacker { where
Data: Deref<Target = [u8]>,
{
pub fn new(data: Data, num_bits: u8) -> BitUnpacker<Data> {
let mask: u64 = if num_bits == 64 { let mask: u64 = if num_bits == 64 {
!0u64 !0u64
} else { } else {
@@ -85,7 +90,7 @@ impl BitUnpacker {
if self.num_bits == 0 { if self.num_bits == 0 {
return 0u64; return 0u64;
} }
let data: &[u8] = self.data.as_slice(); let data: &[u8] = &*self.data;
let num_bits = self.num_bits; let num_bits = self.num_bits;
let mask = self.mask; let mask = self.mask;
let addr_in_bits = idx * num_bits; let addr_in_bits = idx * num_bits;
@@ -104,9 +109,8 @@ impl BitUnpacker {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::{BitPacker, BitUnpacker}; use super::{BitPacker, BitUnpacker};
use crate::directory::OwnedBytes;
fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker, Vec<u64>) { fn create_fastfield_bitpacker(len: usize, num_bits: u8) -> (BitUnpacker<Vec<u8>>, Vec<u64>) {
let mut data = Vec::new(); let mut data = Vec::new();
let mut bitpacker = BitPacker::new(); let mut bitpacker = BitPacker::new();
let max_val: u64 = (1u64 << num_bits as u64) - 1u64; let max_val: u64 = (1u64 << num_bits as u64) - 1u64;
@@ -118,7 +122,7 @@ mod test {
} }
bitpacker.close(&mut data).unwrap(); bitpacker.close(&mut data).unwrap();
assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7); assert_eq!(data.len(), ((num_bits as usize) * len + 7) / 8 + 7);
let bitunpacker = BitUnpacker::new(OwnedBytes::new(data), num_bits); let bitunpacker = BitUnpacker::new(data, num_bits);
(bitunpacker, vals) (bitunpacker, vals)
} }

View File

@@ -1,15 +1,14 @@
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CountingWriter; use crate::common::CountingWriter;
use crate::common::VInt; use crate::common::VInt;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::{TerminatingWrite, WritePtr}; use crate::directory::{TerminatingWrite, WritePtr};
use crate::schema::Field; use crate::schema::Field;
use crate::space_usage::FieldUsage; use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::{self, Read, Write}; use std::io::Write;
use std::io::{self, Read};
use super::HasLen;
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)] #[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
pub struct FileAddr { pub struct FileAddr {
@@ -104,26 +103,25 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
/// for each field. /// for each field.
#[derive(Clone)] #[derive(Clone)]
pub struct CompositeFile { pub struct CompositeFile {
data: FileSlice, data: ReadOnlySource,
offsets_index: HashMap<FileAddr, (usize, usize)>, offsets_index: HashMap<FileAddr, (usize, usize)>,
} }
impl CompositeFile { impl CompositeFile {
/// Opens a composite file stored in a given /// Opens a composite file stored in a given
/// `FileSlice`. /// `ReadOnlySource`.
pub fn open(data: &FileSlice) -> io::Result<CompositeFile> { pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
let end = data.len(); let end = data.len();
let footer_len_data = data.slice_from(end - 4).read_bytes()?; let footer_len_data = data.slice_from(end - 4);
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize; let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
let footer_start = end - 4 - footer_len; let footer_start = end - 4 - footer_len;
let footer_data = data let footer_data = data.slice(footer_start, footer_start + footer_len);
.slice(footer_start, footer_start + footer_len)
.read_bytes()?;
let mut footer_buffer = footer_data.as_slice(); let mut footer_buffer = footer_data.as_slice();
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize; let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
let mut file_addrs = vec![]; let mut file_addrs = vec![];
let mut offsets = vec![]; let mut offsets = vec![];
let mut field_index = HashMap::new(); let mut field_index = HashMap::new();
let mut offset = 0; let mut offset = 0;
@@ -152,19 +150,19 @@ impl CompositeFile {
pub fn empty() -> CompositeFile { pub fn empty() -> CompositeFile {
CompositeFile { CompositeFile {
offsets_index: HashMap::new(), offsets_index: HashMap::new(),
data: FileSlice::empty(), data: ReadOnlySource::empty(),
} }
} }
/// Returns the `FileSlice` associated /// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`. /// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read(&self, field: Field) -> Option<FileSlice> { pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
self.open_read_with_idx(field, 0) self.open_read_with_idx(field, 0)
} }
/// Returns the `FileSlice` associated /// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`. /// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> { pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
self.offsets_index self.offsets_index
.get(&FileAddr { field, idx }) .get(&FileAddr { field, idx })
.map(|&(from, to)| self.data.slice(from, to)) .map(|&(from, to)| self.data.slice(from, to))
@@ -194,44 +192,46 @@ mod test {
use std::path::Path; use std::path::Path;
#[test] #[test]
fn test_composite_file() -> crate::Result<()> { fn test_composite_file() {
let path = Path::new("test_path"); let path = Path::new("test_path");
let directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
{ {
let w = directory.open_write(path).unwrap(); let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w); let mut composite_write = CompositeWrite::wrap(w);
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32)); {
VInt(32431123u64).serialize(&mut write_0)?; let mut write_0 = composite_write.for_field(Field::from_field_id(0u32));
write_0.flush()?; VInt(32431123u64).serialize(&mut write_0).unwrap();
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32)); write_0.flush().unwrap();
VInt(2).serialize(&mut write_4)?; }
write_4.flush()?;
composite_write.close()?; {
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32));
VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap();
}
composite_write.close().unwrap();
} }
{ {
let r = directory.open_read(path)?; let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r)?; let composite_file = CompositeFile::open(&r).unwrap();
{ {
let file0 = composite_file let file0 = composite_file
.open_read(Field::from_field_id(0u32)) .open_read(Field::from_field_id(0u32))
.unwrap() .unwrap();
.read_bytes()?;
let mut file0_buf = file0.as_slice(); let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf)?.0; let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0); assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64); assert_eq!(payload_0, 32431123u64);
} }
{ {
let file4 = composite_file let file4 = composite_file
.open_read(Field::from_field_id(4u32)) .open_read(Field::from_field_id(4u32))
.unwrap() .unwrap();
.read_bytes()?;
let mut file4_buf = file4.as_slice(); let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf)?.0; let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0); assert_eq!(file4_buf.len(), 0);
assert_eq!(payload_4, 2u64); assert_eq!(payload_4, 2u64);
} }
} }
Ok(())
} }
} }

View File

@@ -20,10 +20,9 @@ impl<W: Write> CountingWriter<W> {
self.written_bytes self.written_bytes
} }
/// Returns the underlying write object. pub fn finish(mut self) -> io::Result<(W, u64)> {
/// Note that this method does not trigger any flushing. self.flush()?;
pub fn finish(self) -> W { Ok((self.underlying, self.written_bytes))
self.underlying
} }
} }
@@ -47,6 +46,7 @@ impl<W: Write> Write for CountingWriter<W> {
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> { impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> { fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.underlying.terminate_ref(token) self.underlying.terminate_ref(token)
} }
} }
@@ -63,9 +63,8 @@ mod test {
let mut counting_writer = CountingWriter::wrap(buffer); let mut counting_writer = CountingWriter::wrap(buffer);
let bytes = (0u8..10u8).collect::<Vec<u8>>(); let bytes = (0u8..10u8).collect::<Vec<u8>>();
counting_writer.write_all(&bytes).unwrap(); counting_writer.write_all(&bytes).unwrap();
let len = counting_writer.written_bytes(); let (w, len): (Vec<u8>, u64) = counting_writer.finish().unwrap();
let buffer_restituted: Vec<u8> = counting_writer.finish();
assert_eq!(len, 10u64); assert_eq!(len, 10u64);
assert_eq!(buffer_restituted.len(), 10); assert_eq!(w.len(), 10);
} }
} }

View File

@@ -10,9 +10,7 @@ pub(crate) use self::bitset::TinySet;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite}; pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::counting_writer::CountingWriter; pub use self::counting_writer::CountingWriter;
pub use self::serialize::{BinarySerializable, FixedSize}; pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{ pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
};
pub use byteorder::LittleEndian as Endianness; pub use byteorder::LittleEndian as Endianness;
/// Segment's max doc must be `< MAX_DOC_LIMIT`. /// Segment's max doc must be `< MAX_DOC_LIMIT`.
@@ -66,6 +64,10 @@ pub(crate) fn compute_num_bits(n: u64) -> u8 {
} }
} }
pub(crate) fn is_power_of_2(n: usize) -> bool {
(n > 0) && (n & (n - 1) == 0)
}
/// Has length trait /// Has length trait
pub trait HasLen { pub trait HasLen {
/// Return length /// Return length
@@ -115,16 +117,11 @@ pub fn u64_to_i64(val: u64) -> i64 {
/// For simplicity, tantivy internally handles `f64` as `u64`. /// For simplicity, tantivy internally handles `f64` as `u64`.
/// The mapping is defined by this function. /// The mapping is defined by this function.
/// ///
/// Maps `f64` to `u64` in a monotonic manner, so that bytes lexical order is preserved. /// Maps `f64` to `u64` so that lexical order is preserved.
/// ///
/// This is more suited than simply casting (`val as u64`) /// This is more suited than simply casting (`val as u64`)
/// which would truncate the result /// which would truncate the result
/// ///
/// # Reference
///
/// Daniel Lemire's [blog post](https://lemire.me/blog/2020/12/14/converting-floating-point-numbers-to-integers-while-preserving-order/)
/// explains the mapping in a clear manner.
///
/// # See also /// # See also
/// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html). /// The [reverse mapping is `u64_to_f64`](./fn.u64_to_f64.html).
#[inline(always)] #[inline(always)]
@@ -153,7 +150,6 @@ pub(crate) mod test {
pub use super::minmax; pub use super::minmax;
pub use super::serialize::test::fixed_size_test; pub use super::serialize::test::fixed_size_test;
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
use proptest::prelude::*;
use std::f64; use std::f64;
fn test_i64_converter_helper(val: i64) { fn test_i64_converter_helper(val: i64) {
@@ -164,15 +160,6 @@ pub(crate) mod test {
assert_eq!(u64_to_f64(f64_to_u64(val)), val); assert_eq!(u64_to_f64(f64_to_u64(val)), val);
} }
proptest! {
#[test]
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {
let left_u64 = f64_to_u64(left);
let right_u64 = f64_to_u64(right);
assert_eq!(left_u64 < right_u64, left < right);
}
}
#[test] #[test]
fn test_i64_converter() { fn test_i64_converter() {
assert_eq!(i64_to_u64(i64::min_value()), u64::min_value()); assert_eq!(i64_to_u64(i64::min_value()), u64::min_value());

View File

@@ -89,19 +89,6 @@ impl FixedSize for u64 {
const SIZE_IN_BYTES: usize = 8; const SIZE_IN_BYTES: usize = 8;
} }
impl BinarySerializable for f32 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_f32::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_f32::<Endianness>()
}
}
impl FixedSize for f32 {
const SIZE_IN_BYTES: usize = 4;
}
impl BinarySerializable for i64 { impl BinarySerializable for i64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_i64::<Endianness>(*self) writer.write_i64::<Endianness>(*self)

View File

@@ -5,12 +5,12 @@ use std::io::Read;
use std::io::Write; use std::io::Write;
/// Wrapper over a `u64` that serializes as a variable int. /// Wrapper over a `u64` that serializes as a variable int.
#[derive(Clone, Copy, Debug, Eq, PartialEq)] #[derive(Debug, Eq, PartialEq)]
pub struct VInt(pub u64); pub struct VInt(pub u64);
const STOP_BIT: u8 = 128; const STOP_BIT: u8 = 128;
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] { pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
const START_2: u64 = 1 << 7; const START_2: u64 = 1 << 7;
const START_3: u64 = 1 << 14; const START_3: u64 = 1 << 14;
const START_4: u64 = 1 << 21; const START_4: u64 = 1 << 21;
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
let val = u64::from(val); let val = u64::from(val);
const STOP_BIT: u64 = 128u64; const STOP_BIT: u64 = 128u64;
let (res, num_bytes) = match val { match val {
0..=STOP_1 => (val | STOP_BIT, 1), 0..=STOP_1 => (val | STOP_BIT, 1),
START_2..=STOP_2 => ( START_2..=STOP_2 => (
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)), (val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
@@ -56,9 +56,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
| (STOP_BIT << (8 * 4)), | (STOP_BIT << (8 * 4)),
5, 5,
), ),
}; }
LittleEndian::write_u64(&mut buf[..], res);
&buf[0..num_bytes]
} }
/// Returns the number of bytes covered by a /// Returns the number of bytes covered by a
@@ -87,26 +85,23 @@ fn vint_len(data: &[u8]) -> usize {
/// If the buffer does not start by a valid /// If the buffer does not start by a valid
/// vint payload /// vint payload
pub fn read_u32_vint(data: &mut &[u8]) -> u32 { pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
let (result, vlen) = read_u32_vint_no_advance(*data); let vlen = vint_len(*data);
*data = &data[vlen..];
result
}
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
let vlen = vint_len(data);
let mut result = 0u32; let mut result = 0u32;
let mut shift = 0u64; let mut shift = 0u64;
for &b in &data[..vlen] { for &b in &data[..vlen] {
result |= u32::from(b & 127u8) << shift; result |= u32::from(b & 127u8) << shift;
shift += 7; shift += 7;
} }
(result, vlen) *data = &data[vlen..];
result
} }
/// Write a `u32` as a vint payload. /// Write a `u32` as a vint payload.
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> { pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
let mut buf = [0u8; 8]; let (val, num_bytes) = serialize_vint_u32(val);
let data = serialize_vint_u32(val, &mut buf); let mut buffer = [0u8; 8];
writer.write_all(&data) LittleEndian::write_u64(&mut buffer, val);
writer.write_all(&buffer[..num_bytes])
} }
impl VInt { impl VInt {
@@ -177,6 +172,7 @@ mod tests {
use super::serialize_vint_u32; use super::serialize_vint_u32;
use super::VInt; use super::VInt;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
fn aux_test_vint(val: u64) { fn aux_test_vint(val: u64) {
let mut v = [14u8; 10]; let mut v = [14u8; 10];
@@ -212,10 +208,12 @@ mod tests {
fn aux_test_serialize_vint_u32(val: u32) { fn aux_test_serialize_vint_u32(val: u32) {
let mut buffer = [0u8; 10]; let mut buffer = [0u8; 10];
let mut buffer2 = [0u8; 8]; let mut buffer2 = [0u8; 10];
let len_vint = VInt(val as u64).serialize_into(&mut buffer); let len_vint = VInt(val as u64).serialize_into(&mut buffer);
let res2 = serialize_vint_u32(val, &mut buffer2); let (vint, len) = serialize_vint_u32(val);
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val); assert_eq!(len, len_vint, "len wrong for val {}", val);
LittleEndian::write_u64(&mut buffer2, vint);
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
} }
#[test] #[test]

View File

@@ -5,7 +5,6 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SegmentMetaInventory; use crate::core::SegmentMetaInventory;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::error::OpenReadError;
use crate::directory::ManagedDirectory; use crate::directory::ManagedDirectory;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use crate::directory::MmapDirectory; use crate::directory::MmapDirectory;
@@ -22,6 +21,7 @@ use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::tokenizer::{TextAnalyzer, TokenizerManager}; use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::IndexWriter; use crate::IndexWriter;
use std::borrow::BorrowMut;
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt; use std::fmt;
@@ -57,10 +57,8 @@ pub struct Index {
} }
impl Index { impl Index {
/// Examines the directory to see if it contains an index. /// Examines the director to see if it contains an index
/// pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
/// Effectively, it only checks for the presence of the `meta.json` file.
pub fn exists<Dir: Directory>(dir: &Dir) -> Result<bool, OpenReadError> {
dir.exists(&META_FILEPATH) dir.exists(&META_FILEPATH)
} }
@@ -107,7 +105,7 @@ impl Index {
schema: Schema, schema: Schema,
) -> crate::Result<Index> { ) -> crate::Result<Index> {
let mmap_directory = MmapDirectory::open(directory_path)?; let mmap_directory = MmapDirectory::open(directory_path)?;
if Index::exists(&mmap_directory)? { if Index::exists(&mmap_directory) {
return Err(TantivyError::IndexAlreadyExists); return Err(TantivyError::IndexAlreadyExists);
} }
Index::create(mmap_directory, schema) Index::create(mmap_directory, schema)
@@ -115,7 +113,7 @@ impl Index {
/// Opens or creates a new index in the provided directory /// Opens or creates a new index in the provided directory
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> { pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
if !Index::exists(&dir)? { if !Index::exists(&dir) {
return Index::create(dir, schema); return Index::create(dir, schema);
} }
let index = Index::open(dir)?; let index = Index::open(dir)?;
@@ -142,9 +140,7 @@ impl Index {
Index::create(mmap_directory, schema) Index::create(mmap_directory, schema)
} }
/// Creates a new index given an implementation of the trait `Directory`. /// Creates a new index given an implementation of the trait `Directory`
///
/// If a directory previously existed, it will be erased.
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> { pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
let directory = ManagedDirectory::wrap(dir)?; let directory = ManagedDirectory::wrap(dir)?;
Index::from_directory(directory, schema) Index::from_directory(directory, schema)
@@ -153,8 +149,8 @@ impl Index {
/// Create a new index from a directory. /// Create a new index from a directory.
/// ///
/// This will overwrite existing meta.json /// This will overwrite existing meta.json
fn from_directory(directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> { fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
save_new_metas(schema.clone(), &directory)?; save_new_metas(schema.clone(), directory.borrow_mut())?;
let metas = IndexMeta::with_schema(schema); let metas = IndexMeta::with_schema(schema);
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default()) Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
} }
@@ -304,15 +300,6 @@ impl Index {
) )
} }
/// Helper to create an index writer for tests.
///
/// That index writer only simply has a single thread and a heap of 5 MB.
/// Using a single thread gives us a deterministic allocation of DocId.
#[cfg(test)]
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
self.writer_with_num_threads(1, 10_000_000)
}
/// Creates a multithreaded writer /// Creates a multithreaded writer
/// ///
/// Tantivy will automatically define the number of threads to use. /// Tantivy will automatically define the number of threads to use.
@@ -400,7 +387,7 @@ impl fmt::Debug for Index {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::{RAMDirectory, WatchCallback}; use crate::directory::RAMDirectory;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::{Schema, INDEXED, TEXT}; use crate::schema::{Schema, INDEXED, TEXT};
use crate::IndexReader; use crate::IndexReader;
@@ -424,24 +411,24 @@ mod tests {
#[test] #[test]
fn test_index_exists() { fn test_index_exists() {
let directory = RAMDirectory::create(); let directory = RAMDirectory::create();
assert!(!Index::exists(&directory).unwrap()); assert!(!Index::exists(&directory));
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory).unwrap()); assert!(Index::exists(&directory));
} }
#[test] #[test]
fn open_or_create_should_create() { fn open_or_create_should_create() {
let directory = RAMDirectory::create(); let directory = RAMDirectory::create();
assert!(!Index::exists(&directory).unwrap()); assert!(!Index::exists(&directory));
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory).unwrap()); assert!(Index::exists(&directory));
} }
#[test] #[test]
fn open_or_create_should_open() { fn open_or_create_should_open() {
let directory = RAMDirectory::create(); let directory = RAMDirectory::create();
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory).unwrap()); assert!(Index::exists(&directory));
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok()); assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
} }
@@ -449,7 +436,7 @@ mod tests {
fn create_should_wipeoff_existing() { fn create_should_wipeoff_existing() {
let directory = RAMDirectory::create(); let directory = RAMDirectory::create();
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory).unwrap()); assert!(Index::exists(&directory));
assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok()); assert!(Index::create(directory.clone(), Schema::builder().build()).is_ok());
} }
@@ -457,7 +444,7 @@ mod tests {
fn open_or_create_exists_but_schema_does_not_match() { fn open_or_create_exists_but_schema_does_not_match() {
let directory = RAMDirectory::create(); let directory = RAMDirectory::create();
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
assert!(Index::exists(&directory).unwrap()); assert!(Index::exists(&directory));
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok()); assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
let err = Index::open_or_create(directory, Schema::builder().build()); let err = Index::open_or_create(directory, Schema::builder().build());
assert_eq!( assert_eq!(
@@ -511,28 +498,28 @@ mod tests {
} }
#[test] #[test]
fn test_index_manual_policy_mmap() -> crate::Result<()> { fn test_index_manual_policy_mmap() {
let schema = throw_away_schema(); let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let mut index = Index::create_from_tempdir(schema)?; let mut index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_for_tests()?; let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit()?; writer.commit().unwrap();
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into()?; .try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
let (sender, receiver) = crossbeam::channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = index.directory_mut().watch(WatchCallback::new(move || { let _handle = index.directory_mut().watch(Box::new(move || {
let _ = sender.send(()); let _ = sender.send(());
})); }));
writer.commit()?; writer.commit().unwrap();
assert!(receiver.recv().is_ok()); assert!(receiver.recv().is_ok());
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
reader.reload()?; reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 1); assert_eq!(reader.searcher().num_docs(), 1);
Ok(())
} }
#[test] #[test]
@@ -552,35 +539,23 @@ mod tests {
test_index_on_commit_reload_policy_aux(field, &write_index, &reader); test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
} }
} }
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) { fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
let mut reader_index = reader.index(); let mut reader_index = reader.index();
let (sender, receiver) = crossbeam::channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
let _watch_handle = reader_index let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
.directory_mut() let _ = sender.send(());
.watch(WatchCallback::new(move || { }));
let _ = sender.send(()); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
}));
let mut writer = index.writer_for_tests().unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
writer.commit().unwrap(); writer.commit().unwrap();
// We need a loop here because it is possible for notify to send more than assert!(receiver.recv().is_ok());
// one modify event. It was observed on CI on MacOS. assert_eq!(reader.searcher().num_docs(), 1);
loop {
assert!(receiver.recv().is_ok());
if reader.searcher().num_docs() == 1 {
break;
}
}
writer.add_document(doc!(field=>2u64)); writer.add_document(doc!(field=>2u64));
writer.commit().unwrap(); writer.commit().unwrap();
// ... Same as above assert!(receiver.recv().is_ok());
loop { assert_eq!(reader.searcher().num_docs(), 2);
assert!(receiver.recv().is_ok());
if reader.searcher().num_docs() == 2 {
break;
}
}
} }
// This test will not pass on windows, because windows // This test will not pass on windows, because windows
@@ -598,7 +573,7 @@ mod tests {
writer.add_document(doc!(field => i)); writer.add_document(doc!(field => i));
} }
let (sender, receiver) = crossbeam::channel::unbounded(); let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = directory.watch(WatchCallback::new(move || { let _handle = directory.watch(Box::new(move || {
let _ = sender.send(()); let _ = sender.send(());
})); }));
writer.commit().unwrap(); writer.commit().unwrap();

View File

@@ -1,10 +1,9 @@
use std::io;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::positions::PositionReader; use crate::positions::PositionReader;
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::postings::{BlockSegmentPostings, SegmentPostings}; use crate::postings::{BlockSegmentPostings, SegmentPostings};
use crate::schema::FieldType;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
@@ -16,7 +15,7 @@ use crate::termdict::TermDictionary;
/// ///
/// It is safe to delete the segment associated to /// It is safe to delete the segment associated to
/// an `InvertedIndexReader`. As long as it is open, /// an `InvertedIndexReader`. As long as it is open,
/// the `FileSlice` it is relying on should /// the `ReadOnlySource` it is relying on should
/// stay available. /// stay available.
/// ///
/// ///
@@ -24,9 +23,9 @@ use crate::termdict::TermDictionary;
/// the `SegmentReader`'s [`.inverted_index(...)`] method /// the `SegmentReader`'s [`.inverted_index(...)`] method
pub struct InvertedIndexReader { pub struct InvertedIndexReader {
termdict: TermDictionary, termdict: TermDictionary,
postings_file_slice: FileSlice, postings_source: ReadOnlySource,
positions_file_slice: FileSlice, positions_source: ReadOnlySource,
positions_idx_file_slice: FileSlice, positions_idx_source: ReadOnlySource,
record_option: IndexRecordOption, record_option: IndexRecordOption,
total_num_tokens: u64, total_num_tokens: u64,
} }
@@ -35,38 +34,42 @@ impl InvertedIndexReader {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry #[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
pub(crate) fn new( pub(crate) fn new(
termdict: TermDictionary, termdict: TermDictionary,
postings_file_slice: FileSlice, postings_source: ReadOnlySource,
positions_file_slice: FileSlice, positions_source: ReadOnlySource,
positions_idx_file_slice: FileSlice, positions_idx_source: ReadOnlySource,
record_option: IndexRecordOption, record_option: IndexRecordOption,
) -> io::Result<InvertedIndexReader> { ) -> InvertedIndexReader {
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8); let total_num_tokens_data = postings_source.slice(0, 8);
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?; let mut total_num_tokens_cursor = total_num_tokens_data.as_slice();
Ok(InvertedIndexReader { let total_num_tokens = u64::deserialize(&mut total_num_tokens_cursor).unwrap_or(0u64);
InvertedIndexReader {
termdict, termdict,
postings_file_slice: postings_body, postings_source: postings_source.slice_from(8),
positions_file_slice, positions_source,
positions_idx_file_slice, positions_idx_source,
record_option, record_option,
total_num_tokens, total_num_tokens,
}) }
} }
/// Creates an empty `InvertedIndexReader` object, which /// Creates an empty `InvertedIndexReader` object, which
/// contains no terms at all. /// contains no terms at all.
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader { pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
let record_option = field_type
.get_index_record_option()
.unwrap_or(IndexRecordOption::Basic);
InvertedIndexReader { InvertedIndexReader {
termdict: TermDictionary::empty(), termdict: TermDictionary::empty(),
postings_file_slice: FileSlice::empty(), postings_source: ReadOnlySource::empty(),
positions_file_slice: FileSlice::empty(), positions_source: ReadOnlySource::empty(),
positions_idx_file_slice: FileSlice::empty(), positions_idx_source: ReadOnlySource::empty(),
record_option, record_option,
total_num_tokens: 0u64, total_num_tokens: 0u64,
} }
} }
/// Returns the term info associated with the term. /// Returns the term info associated with the term.
pub fn get_term_info(&self, term: &Term) -> io::Result<Option<TermInfo>> { pub fn get_term_info(&self, term: &Term) -> Option<TermInfo> {
self.termdict.get(term.value_bytes()) self.termdict.get(term.value_bytes())
} }
@@ -89,12 +92,11 @@ impl InvertedIndexReader {
&self, &self,
term_info: &TermInfo, term_info: &TermInfo,
block_postings: &mut BlockSegmentPostings, block_postings: &mut BlockSegmentPostings,
) -> io::Result<()> { ) {
let start_offset = term_info.postings_start_offset as usize; let offset = term_info.postings_offset as usize;
let stop_offset = term_info.postings_stop_offset as usize; let end_source = self.postings_source.len();
let postings_slice = self.postings_file_slice.slice(start_offset, stop_offset); let postings_slice = self.postings_source.slice(offset, end_source);
block_postings.reset(term_info.doc_freq, postings_slice.read_bytes()?); block_postings.reset(term_info.doc_freq, postings_slice);
Ok(())
} }
/// Returns a block postings given a `Term`. /// Returns a block postings given a `Term`.
@@ -105,10 +107,9 @@ impl InvertedIndexReader {
&self, &self,
term: &Term, term: &Term,
option: IndexRecordOption, option: IndexRecordOption,
) -> io::Result<Option<BlockSegmentPostings>> { ) -> Option<BlockSegmentPostings> {
self.get_term_info(term)? self.get_term_info(term)
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option)) .map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
.transpose()
} }
/// Returns a block postings given a `term_info`. /// Returns a block postings given a `term_info`.
@@ -119,12 +120,10 @@ impl InvertedIndexReader {
&self, &self,
term_info: &TermInfo, term_info: &TermInfo,
requested_option: IndexRecordOption, requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> { ) -> BlockSegmentPostings {
let postings_data = self.postings_file_slice.slice( let offset = term_info.postings_offset as usize;
term_info.postings_start_offset as usize, let postings_data = self.postings_source.slice_from(offset);
term_info.postings_stop_offset as usize, BlockSegmentPostings::from_data(
);
BlockSegmentPostings::open(
term_info.doc_freq, term_info.doc_freq,
postings_data, postings_data,
self.record_option, self.record_option,
@@ -140,23 +139,20 @@ impl InvertedIndexReader {
&self, &self,
term_info: &TermInfo, term_info: &TermInfo,
option: IndexRecordOption, option: IndexRecordOption,
) -> io::Result<SegmentPostings> { ) -> SegmentPostings {
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?; let block_postings = self.read_block_postings_from_terminfo(term_info, option);
let position_stream = { let position_stream = {
if option.has_positions() { if option.has_positions() {
let position_reader = self.positions_file_slice.clone(); let position_reader = self.positions_source.clone();
let skip_reader = self.positions_idx_file_slice.clone(); let skip_reader = self.positions_idx_source.clone();
let position_reader = let position_reader =
PositionReader::new(position_reader, skip_reader, term_info.positions_idx)?; PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
Some(position_reader) Some(position_reader)
} else { } else {
None None
} }
}; };
Ok(SegmentPostings::from_block_postings( SegmentPostings::from_block_postings(block_postings, position_stream)
block_postings,
position_stream,
))
} }
/// Returns the total number of tokens recorded for all documents /// Returns the total number of tokens recorded for all documents
@@ -175,31 +171,24 @@ impl InvertedIndexReader {
/// For instance, requesting `IndexRecordOption::Freq` for a /// For instance, requesting `IndexRecordOption::Freq` for a
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings` /// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
/// with `DocId`s and frequencies. /// with `DocId`s and frequencies.
pub fn read_postings( pub fn read_postings(&self, term: &Term, option: IndexRecordOption) -> Option<SegmentPostings> {
&self, self.get_term_info(term)
term: &Term,
option: IndexRecordOption,
) -> io::Result<Option<SegmentPostings>> {
self.get_term_info(term)?
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option)) .map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
.transpose()
} }
pub(crate) fn read_postings_no_deletes( pub(crate) fn read_postings_no_deletes(
&self, &self,
term: &Term, term: &Term,
option: IndexRecordOption, option: IndexRecordOption,
) -> io::Result<Option<SegmentPostings>> { ) -> Option<SegmentPostings> {
self.get_term_info(term)? self.get_term_info(term)
.map(|term_info| self.read_postings_from_terminfo(&term_info, option)) .map(|term_info| self.read_postings_from_terminfo(&term_info, option))
.transpose()
} }
/// Returns the number of documents containing the term. /// Returns the number of documents containing the term.
pub fn doc_freq(&self, term: &Term) -> io::Result<u32> { pub fn doc_freq(&self, term: &Term) -> u32 {
Ok(self self.get_term_info(term)
.get_term_info(term)?
.map(|term_info| term_info.doc_freq) .map(|term_info| term_info.doc_freq)
.unwrap_or(0u32)) .unwrap_or(0u32)
} }
} }

View File

@@ -1,17 +1,18 @@
use crate::collector::Collector; use crate::collector::Collector;
use crate::core::Executor; use crate::core::Executor;
use crate::core::InvertedIndexReader;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::query::Query; use crate::query::Query;
use crate::schema::Document; use crate::schema::Document;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::Term; use crate::schema::{Field, Term};
use crate::space_usage::SearcherSpaceUsage; use crate::space_usage::SearcherSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::termdict::TermMerger;
use crate::DocAddress; use crate::DocAddress;
use crate::Index; use crate::Index;
use std::fmt;
use std::{fmt, io}; use std::sync::Arc;
/// Holds a list of `SegmentReader`s ready for search. /// Holds a list of `SegmentReader`s ready for search.
/// ///
@@ -31,17 +32,17 @@ impl Searcher {
schema: Schema, schema: Schema,
index: Index, index: Index,
segment_readers: Vec<SegmentReader>, segment_readers: Vec<SegmentReader>,
) -> io::Result<Searcher> { ) -> Searcher {
let store_readers: Vec<StoreReader> = segment_readers let store_readers = segment_readers
.iter() .iter()
.map(SegmentReader::get_store_reader) .map(SegmentReader::get_store_reader)
.collect::<io::Result<Vec<_>>>()?; .collect();
Ok(Searcher { Searcher {
schema, schema,
index, index,
segment_readers, segment_readers,
store_readers, store_readers,
}) }
} }
/// Returns the `Index` associated to the `Searcher` /// Returns the `Index` associated to the `Searcher`
@@ -74,14 +75,13 @@ impl Searcher {
/// Return the overall number of documents containing /// Return the overall number of documents containing
/// the given term. /// the given term.
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> { pub fn doc_freq(&self, term: &Term) -> u64 {
let mut total_doc_freq = 0; self.segment_readers
for segment_reader in &self.segment_readers { .iter()
let inverted_index = segment_reader.inverted_index(term.field())?; .map(|segment_reader| {
let doc_freq = inverted_index.doc_freq(term)?; u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
total_doc_freq += u64::from(doc_freq); })
} .sum::<u64>()
Ok(total_doc_freq)
} }
/// Return the list of segment readers /// Return the list of segment readers
@@ -147,13 +147,44 @@ impl Searcher {
collector.merge_fruits(fruits) collector.merge_fruits(fruits)
} }
/// Return the field searcher associated to a `Field`.
pub fn field(&self, field: Field) -> FieldSearcher {
let inv_index_readers = self
.segment_readers
.iter()
.map(|segment_reader| segment_reader.inverted_index(field))
.collect::<Vec<_>>();
FieldSearcher::new(inv_index_readers)
}
/// Summarize total space usage of this searcher. /// Summarize total space usage of this searcher.
pub fn space_usage(&self) -> io::Result<SearcherSpaceUsage> { pub fn space_usage(&self) -> SearcherSpaceUsage {
let mut space_usage = SearcherSpaceUsage::new(); let mut space_usage = SearcherSpaceUsage::new();
for segment_reader in &self.segment_readers { for segment_reader in self.segment_readers.iter() {
space_usage.add_segment(segment_reader.space_usage()?); space_usage.add_segment(segment_reader.space_usage());
} }
Ok(space_usage) space_usage
}
}
pub struct FieldSearcher {
inv_index_readers: Vec<Arc<InvertedIndexReader>>,
}
impl FieldSearcher {
fn new(inv_index_readers: Vec<Arc<InvertedIndexReader>>) -> FieldSearcher {
FieldSearcher { inv_index_readers }
}
/// Returns a Stream over all of the sorted unique terms of
/// for the given field.
pub fn terms(&self) -> TermMerger<'_> {
let term_streamers: Vec<_> = self
.inv_index_readers
.iter()
.map(|inverted_index| inverted_index.terms().stream())
.collect();
TermMerger::new(term_streamers)
} }
} }

View File

@@ -4,7 +4,7 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::{FileSlice, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
@@ -78,9 +78,10 @@ impl Segment {
} }
/// Open one of the component file for a *regular* read. /// Open one of the component file for a *regular* read.
pub fn open_read(&self, component: SegmentComponent) -> Result<FileSlice, OpenReadError> { pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
let path = self.relative_path(component); let path = self.relative_path(component);
self.index.directory().open_read(&path) let source = self.index.directory().open_read(&path)?;
Ok(source)
} }
/// Open one of the component file for *regular* write. /// Open one of the component file for *regular* write.

View File

@@ -20,7 +20,7 @@ pub enum SegmentComponent {
/// Dictionary associating `Term`s to `TermInfo`s which is /// Dictionary associating `Term`s to `TermInfo`s which is
/// simply an address into the `postings` file and the `positions` file. /// simply an address into the `postings` file and the `positions` file.
TERMS, TERMS,
/// Row-oriented, compressed storage of the documents. /// Row-oriented, LZ4-compressed storage of the documents.
/// Accessing a document from the store is relatively slow, as it /// Accessing a document from the store is relatively slow, as it
/// requires to decompress the entire block it belongs to. /// requires to decompress the entire block it belongs to.
STORE, STORE,

View File

@@ -1,26 +1,26 @@
use crate::common::CompositeFile;
use crate::common::HasLen; use crate::common::HasLen;
use crate::core::InvertedIndexReader; use crate::core::InvertedIndexReader;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet; use crate::fastfield::DeleteBitSet;
use crate::fastfield::FacetReader; use crate::fastfield::FacetReader;
use crate::fastfield::FastFieldReaders; use crate::fastfield::FastFieldReaders;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders}; use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::schema::Field;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, IndexRecordOption};
use crate::space_usage::SegmentSpaceUsage; use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
use crate::DocId; use crate::DocId;
use crate::{common::CompositeFile, error::DataCorruption};
use fail::fail_point; use fail::fail_point;
use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::{collections::HashMap, io};
/// Entry point to access all of the datastructures of the `Segment` /// Entry point to access all of the datastructures of the `Segment`
/// ///
@@ -50,7 +50,7 @@ pub struct SegmentReader {
fast_fields_readers: Arc<FastFieldReaders>, fast_fields_readers: Arc<FastFieldReaders>,
fieldnorm_readers: FieldNormReaders, fieldnorm_readers: FieldNormReaders,
store_file: FileSlice, store_source: ReadOnlySource,
delete_bitset_opt: Option<DeleteBitSet>, delete_bitset_opt: Option<DeleteBitSet>,
schema: Schema, schema: Schema,
} }
@@ -106,26 +106,16 @@ impl SegmentReader {
} }
/// Accessor to the `FacetReader` associated to a given `Field`. /// Accessor to the `FacetReader` associated to a given `Field`.
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> { pub fn facet_reader(&self, field: Field) -> Option<FacetReader> {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
if field_entry.field_type() != &FieldType::HierarchicalFacet { if field_entry.field_type() != &FieldType::HierarchicalFacet {
return Err(crate::TantivyError::InvalidArgument(format!( return None;
"Field {:?} is not a facet field.",
field_entry.name()
)));
} }
let term_ords_reader = self.fast_fields().u64s(field).ok_or_else(|| { let term_ords_reader = self.fast_fields().u64s(field)?;
DataCorruption::comment_only(format!( let termdict_source = self.termdict_composite.open_read(field)?;
"Cannot find data for hierarchical facet {:?}", let termdict = TermDictionary::from_source(&termdict_source);
field_entry.name() let facet_reader = FacetReader::new(term_ords_reader, termdict);
)) Some(facet_reader)
})?;
let termdict = self
.termdict_composite
.open_read(field)
.map(TermDictionary::open)
.unwrap_or_else(|| Ok(TermDictionary::empty()))?;
Ok(FacetReader::new(term_ords_reader, termdict))
} }
/// Accessor to the segment's `Field norms`'s reader. /// Accessor to the segment's `Field norms`'s reader.
@@ -135,45 +125,47 @@ impl SegmentReader {
/// ///
/// They are simply stored as a fast field, serialized in /// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment. /// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> { pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| { if let Some(fieldnorm_reader) = self.fieldnorm_readers.get_field(field) {
fieldnorm_reader
} else {
let field_name = self.schema.get_field_name(field); let field_name = self.schema.get_field_name(field);
let err_msg = format!( let err_msg = format!(
"Field norm not found for field {:?}. Was it marked as indexed during indexing?", "Field norm not found for field {:?}. Was it market as indexed during indexing.",
field_name field_name
); );
crate::TantivyError::SchemaError(err_msg) panic!(err_msg);
}) }
} }
/// Accessor to the segment's `StoreReader`. /// Accessor to the segment's `StoreReader`.
pub fn get_store_reader(&self) -> io::Result<StoreReader> { pub fn get_store_reader(&self) -> StoreReader {
StoreReader::open(self.store_file.clone()) StoreReader::from_source(self.store_source.clone())
} }
/// Open a new segment for reading. /// Open a new segment for reading.
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> { pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
let termdict_file = segment.open_read(SegmentComponent::TERMS)?; let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
let termdict_composite = CompositeFile::open(&termdict_file)?; let termdict_composite = CompositeFile::open(&termdict_source)?;
let store_file = segment.open_read(SegmentComponent::STORE)?; let store_source = segment.open_read(SegmentComponent::STORE)?;
fail_point!("SegmentReader::open#middle"); fail_point!("SegmentReader::open#middle");
let postings_file = segment.open_read(SegmentComponent::POSTINGS)?; let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
let postings_composite = CompositeFile::open(&postings_file)?; let postings_composite = CompositeFile::open(&postings_source)?;
let positions_composite = { let positions_composite = {
if let Ok(positions_file) = segment.open_read(SegmentComponent::POSITIONS) { if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
CompositeFile::open(&positions_file)? CompositeFile::open(&source)?
} else { } else {
CompositeFile::empty() CompositeFile::empty()
} }
}; };
let positions_idx_composite = { let positions_idx_composite = {
if let Ok(positions_skip_file) = segment.open_read(SegmentComponent::POSITIONSSKIP) { if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
CompositeFile::open(&positions_skip_file)? CompositeFile::open(&source)?
} else { } else {
CompositeFile::empty() CompositeFile::empty()
} }
@@ -187,18 +179,17 @@ impl SegmentReader {
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?); Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?; let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?; let fieldnorm_readers = FieldNormReaders::new(fieldnorm_data)?;
let delete_bitset_opt = if segment.meta().has_deletes() { let delete_bitset_opt = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?; let delete_data = segment.open_read(SegmentComponent::DELETE)?;
let delete_bitset = DeleteBitSet::open(delete_data)?; Some(DeleteBitSet::open(delete_data))
Some(delete_bitset)
} else { } else {
None None
}; };
Ok(SegmentReader { Ok(SegmentReader {
inv_idx_reader_cache: Default::default(), inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
max_doc: segment.meta().max_doc(), max_doc: segment.meta().max_doc(),
num_docs: segment.meta().num_docs(), num_docs: segment.meta().num_docs(),
termdict_composite, termdict_composite,
@@ -206,7 +197,7 @@ impl SegmentReader {
fast_fields_readers: fast_field_readers, fast_fields_readers: fast_field_readers,
fieldnorm_readers, fieldnorm_readers,
segment_id: segment.id(), segment_id: segment.id(),
store_file, store_source,
delete_bitset_opt, delete_bitset_opt,
positions_composite, positions_composite,
positions_idx_composite, positions_idx_composite,
@@ -221,64 +212,58 @@ impl SegmentReader {
/// The field reader is in charge of iterating through the /// The field reader is in charge of iterating through the
/// term dictionary associated to a specific field, /// term dictionary associated to a specific field,
/// and opening the posting list associated to any term. /// and opening the posting list associated to any term.
/// pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
/// is returned.
/// Similarly if the field is marked as indexed but no term has been indexed for the given
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
if let Some(inv_idx_reader) = self if let Some(inv_idx_reader) = self
.inv_idx_reader_cache .inv_idx_reader_cache
.read() .read()
.expect("Lock poisoned. This should never happen") .expect("Lock poisoned. This should never happen")
.get(&field) .get(&field)
{ {
return Ok(Arc::clone(inv_idx_reader)); return Arc::clone(inv_idx_reader);
} }
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
let record_option_opt = field_type.get_index_record_option(); let record_option_opt = field_type.get_index_record_option();
if record_option_opt.is_none() { if record_option_opt.is_none() {
warn!("Field {:?} does not seem indexed.", field_entry.name()); panic!("Field {:?} does not seem indexed.", field_entry.name());
} }
let postings_file_opt = self.postings_composite.open_read(field); let record_option = record_option_opt.unwrap();
if postings_file_opt.is_none() || record_option_opt.is_none() { let postings_source_opt = self.postings_composite.open_read(field);
if postings_source_opt.is_none() {
// no documents in the segment contained this field. // no documents in the segment contained this field.
// As a result, no data is associated to the inverted index. // As a result, no data is associated to the inverted index.
// //
// Returns an empty inverted index. // Returns an empty inverted index.
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic); return Arc::new(InvertedIndexReader::empty(field_type));
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
} }
let record_option = record_option_opt.unwrap(); let postings_source = postings_source_opt.unwrap();
let postings_file = postings_file_opt.unwrap();
let termdict_file: FileSlice = self.termdict_composite.open_read(field) let termdict_source = self.termdict_composite.open_read(field).expect(
.ok_or_else(|| "Failed to open field term dictionary in composite file. Is the field indexed?",
DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name())) );
)?;
let positions_file = self let positions_source = self
.positions_composite .positions_composite
.open_read(field) .open_read(field)
.expect("Index corrupted. Failed to open field positions in composite file."); .expect("Index corrupted. Failed to open field positions in composite file.");
let positions_idx_file = self let positions_idx_source = self
.positions_idx_composite .positions_idx_composite
.open_read(field) .open_read(field)
.expect("Index corrupted. Failed to open field positions in composite file."); .expect("Index corrupted. Failed to open field positions in composite file.");
let inv_idx_reader = Arc::new(InvertedIndexReader::new( let inv_idx_reader = Arc::new(InvertedIndexReader::new(
TermDictionary::open(termdict_file)?, TermDictionary::from_source(&termdict_source),
postings_file, postings_source,
positions_file, positions_source,
positions_idx_file, positions_idx_source,
record_option, record_option,
)?); ));
// by releasing the lock in between, we may end up opening the inverting index // by releasing the lock in between, we may end up opening the inverting index
// twice, but this is fine. // twice, but this is fine.
@@ -287,7 +272,7 @@ impl SegmentReader {
.expect("Field reader cache lock poisoned. This should never happen.") .expect("Field reader cache lock poisoned. This should never happen.")
.insert(field, Arc::clone(&inv_idx_reader)); .insert(field, Arc::clone(&inv_idx_reader));
Ok(inv_idx_reader) inv_idx_reader
} }
/// Returns the segment id /// Returns the segment id
@@ -315,8 +300,8 @@ impl SegmentReader {
} }
/// Summarize total space usage of this segment. /// Summarize total space usage of this segment.
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> { pub fn space_usage(&self) -> SegmentSpaceUsage {
Ok(SegmentSpaceUsage::new( SegmentSpaceUsage::new(
self.num_docs(), self.num_docs(),
self.termdict_composite.space_usage(), self.termdict_composite.space_usage(),
self.postings_composite.space_usage(), self.postings_composite.space_usage(),
@@ -324,12 +309,12 @@ impl SegmentReader {
self.positions_idx_composite.space_usage(), self.positions_idx_composite.space_usage(),
self.fast_fields_readers.space_usage(), self.fast_fields_readers.space_usage(),
self.fieldnorm_readers.space_usage(), self.fieldnorm_readers.space_usage(),
self.get_store_reader()?.space_usage(), self.get_store_reader().space_usage(),
self.delete_bitset_opt self.delete_bitset_opt
.as_ref() .as_ref()
.map(DeleteBitSet::space_usage) .map(DeleteBitSet::space_usage)
.unwrap_or(0), .unwrap_or(0),
)) )
} }
} }
@@ -346,7 +331,7 @@ mod test {
use crate::DocId; use crate::DocId;
#[test] #[test]
fn test_alive_docs_iterator() -> crate::Result<()> { fn test_alive_docs_iterator() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED); schema_builder.add_text_field("name", TEXT | STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -354,26 +339,26 @@ mod test {
let name = schema.get_field("name").unwrap(); let name = schema.get_field("name").unwrap();
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(name => "tantivy")); index_writer.add_document(doc!(name => "tantivy"));
index_writer.add_document(doc!(name => "horse")); index_writer.add_document(doc!(name => "horse"));
index_writer.add_document(doc!(name => "jockey")); index_writer.add_document(doc!(name => "jockey"));
index_writer.add_document(doc!(name => "cap")); index_writer.add_document(doc!(name => "cap"));
// we should now have one segment with two docs // we should now have one segment with two docs
index_writer.commit()?; index_writer.commit().unwrap();
} }
{ {
let mut index_writer2 = index.writer(50_000_000)?; let mut index_writer2 = index.writer(50_000_000).unwrap();
index_writer2.delete_term(Term::from_field_text(name, "horse")); index_writer2.delete_term(Term::from_field_text(name, "horse"));
index_writer2.delete_term(Term::from_field_text(name, "cap")); index_writer2.delete_term(Term::from_field_text(name, "cap"));
// ok, now we should have a deleted doc // ok, now we should have a deleted doc
index_writer2.commit()?; index_writer2.commit().unwrap();
} }
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect(); let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
assert_eq!(vec![0u32, 2u32], docs); assert_eq!(vec![0u32, 2u32], docs);
Ok(())
} }
} }

View File

@@ -1,9 +1,9 @@
use crate::directory::directory_lock::Lock; use crate::directory::directory_lock::Lock;
use crate::directory::error::LockError; use crate::directory::error::LockError;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::WatchCallback;
use crate::directory::WatchHandle; use crate::directory::WatchHandle;
use crate::directory::{FileHandle, WatchCallback}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{FileSlice, WritePtr};
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -11,6 +11,7 @@ use std::marker::Send;
use std::marker::Sync; use std::marker::Sync;
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::result;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
@@ -79,7 +80,7 @@ fn try_acquire_lock(
) -> Result<DirectoryLock, TryAcquireLockError> { ) -> Result<DirectoryLock, TryAcquireLockError> {
let mut write = directory.open_write(filepath).map_err(|e| match e { let mut write = directory.open_write(filepath).map_err(|e| match e {
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists, OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error), OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
})?; })?;
write.flush().map_err(TryAcquireLockError::IOError)?; write.flush().map_err(TryAcquireLockError::IOError)?;
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard { Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
@@ -108,43 +109,37 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
/// should be your default choice. /// should be your default choice.
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which /// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be used mostly for tests. /// should be used mostly for tests.
///
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static { pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// Opens a file and returns a boxed `FileHandle`. /// Opens a virtual file for read.
/// ///
/// Users of `Directory` should typically call `Directory::open_read(...)`,
/// while `Directory` implementor should implement `get_file_handle()`.
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError>;
/// Once a virtual file is open, its data may not /// Once a virtual file is open, its data may not
/// change. /// change.
/// ///
/// Specifically, subsequent writes or flushes should /// Specifically, subsequent writes or flushes should
/// have no effect on the returned `FileSlice` object. /// have no effect on the returned `ReadOnlySource` object.
/// ///
/// You should only use this to read files create with [Directory::open_write]. /// You should only use this to read files create with [Directory::open_write].
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
let file_handle = self.get_file_handle(path)?;
Ok(FileSlice::new(file_handle))
}
/// Removes a file /// Removes a file
/// ///
/// Removing a file will not affect an eventual /// Removing a file will not affect an eventual
/// existing FileSlice pointing to it. /// existing ReadOnlySource pointing to it.
/// ///
/// Removing a nonexistent file, yields a /// Removing a nonexistent file, yields a
/// `DeleteError::DoesNotExist`. /// `DeleteError::DoesNotExist`.
fn delete(&self, path: &Path) -> Result<(), DeleteError>; fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
/// Returns true iff the file exists /// Returns true iff the file exists
fn exists(&self, path: &Path) -> Result<bool, OpenReadError>; fn exists(&self, path: &Path) -> bool;
/// Opens a writer for the *virtual file* associated with /// Opens a writer for the *virtual file* associated with
/// a Path. /// a Path.
/// ///
/// Right after this call, the file should be created /// Right after this call, the file should be created
/// and any subsequent call to `open_read` for the /// and any subsequent call to `open_read` for the
/// same path should return a `FileSlice`. /// same path should return a `ReadOnlySource`.
/// ///
/// Write operations may be aggressively buffered. /// Write operations may be aggressively buffered.
/// The client of this trait is responsible for calling flush /// The client of this trait is responsible for calling flush
@@ -158,7 +153,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// was not called. /// was not called.
/// ///
/// The file may not previously exist. /// The file may not previously exist.
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError>; fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
/// Reads the full content file that has been written using /// Reads the full content file that has been written using
/// atomic_write. /// atomic_write.
@@ -174,7 +169,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// a partially written file. /// a partially written file.
/// ///
/// The file may or may not previously exist. /// The file may or may not previously exist.
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()>; fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
/// Acquire a lock in the given directory. /// Acquire a lock in the given directory.
/// ///

View File

@@ -1,68 +1,162 @@
use crate::Version; use crate::Version;
use std::error::Error as StdError;
use std::fmt; use std::fmt;
use std::io; use std::io;
use std::path::PathBuf; use std::path::PathBuf;
/// Error while trying to acquire a directory lock. /// Error while trying to acquire a directory lock.
#[derive(Debug, Error)] #[derive(Debug, Fail)]
pub enum LockError { pub enum LockError {
/// Failed to acquired a lock as it is already held by another /// Failed to acquired a lock as it is already held by another
/// client. /// client.
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period. /// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call. /// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
#[error("Could not acquire lock as it is already held, possibly by a different process.")] #[fail(
display = "Could not acquire lock as it is already held, possibly by a different process."
)]
LockBusy, LockBusy,
/// Trying to acquire a lock failed with an `IOError` /// Trying to acquire a lock failed with an `IOError`
#[error("Failed to acquire the lock due to an io:Error.")] #[fail(display = "Failed to acquire the lock due to an io:Error.")]
IOError(io::Error), IOError(io::Error),
} }
/// General IO error with an optional path to the offending file.
#[derive(Debug)]
pub struct IOError {
path: Option<PathBuf>,
err: io::Error,
}
impl Into<io::Error> for IOError {
fn into(self) -> io::Error {
self.err
}
}
impl fmt::Display for IOError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.path {
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
None => write!(f, "io error occurred: '{}'", self.err),
}
}
}
impl StdError for IOError {
fn description(&self) -> &str {
"io error occurred"
}
fn cause(&self) -> Option<&dyn StdError> {
Some(&self.err)
}
}
impl IOError {
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
IOError {
path: Some(path),
err,
}
}
}
impl From<io::Error> for IOError {
fn from(err: io::Error) -> IOError {
IOError { path: None, err }
}
}
/// Error that may occur when opening a directory /// Error that may occur when opening a directory
#[derive(Debug, Error)] #[derive(Debug)]
pub enum OpenDirectoryError { pub enum OpenDirectoryError {
/// The underlying directory does not exists. /// The underlying directory does not exists.
#[error("Directory does not exist: '{0}'.")]
DoesNotExist(PathBuf), DoesNotExist(PathBuf),
/// The path exists but is not a directory. /// The path exists but is not a directory.
#[error("Path exists but is not a directory: '{0}'.")]
NotADirectory(PathBuf), NotADirectory(PathBuf),
/// Failed to create a temp directory.
#[error("Failed to create a temporary directory: '{0}'.")]
FailedToCreateTempDir(io::Error),
/// IoError /// IoError
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")] IoError(io::Error),
IoError { }
/// underlying io Error.
io_error: io::Error, impl From<io::Error> for OpenDirectoryError {
/// directory we tried to open. fn from(io_err: io::Error) -> Self {
directory_path: PathBuf, OpenDirectoryError::IoError(io_err)
}, }
}
impl fmt::Display for OpenDirectoryError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenDirectoryError::DoesNotExist(ref path) => {
write!(f, "the underlying directory '{:?}' does not exist", path)
}
OpenDirectoryError::NotADirectory(ref path) => {
write!(f, "the path '{:?}' exists but is not a directory", path)
}
OpenDirectoryError::IoError(ref err) => write!(
f,
"IOError while trying to open/create the directory. {:?}",
err
),
}
}
}
impl StdError for OpenDirectoryError {
fn description(&self) -> &str {
"error occurred while opening a directory"
}
fn cause(&self) -> Option<&dyn StdError> {
None
}
} }
/// Error that may occur when starting to write in a file /// Error that may occur when starting to write in a file
#[derive(Debug, Error)] #[derive(Debug)]
pub enum OpenWriteError { pub enum OpenWriteError {
/// Our directory is WORM, writing an existing file is forbidden. /// Our directory is WORM, writing an existing file is forbidden.
/// Checkout the `Directory` documentation. /// Checkout the `Directory` documentation.
#[error("File already exists: '{0}'")]
FileAlreadyExists(PathBuf), FileAlreadyExists(PathBuf),
/// Any kind of IO error that happens when /// Any kind of IO error that happens when
/// writing in the underlying IO device. /// writing in the underlying IO device.
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")] IOError(IOError),
IOError {
/// The underlying `io::Error`.
io_error: io::Error,
/// File path of the file that tantivy failed to open for write.
filepath: PathBuf,
},
} }
impl OpenWriteError { impl From<IOError> for OpenWriteError {
/// Wraps an io error. fn from(err: IOError) -> OpenWriteError {
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self { OpenWriteError::IOError(err)
Self::IOError { io_error, filepath }
} }
} }
impl fmt::Display for OpenWriteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenWriteError::FileAlreadyExists(ref path) => {
write!(f, "the file '{:?}' already exists", path)
}
OpenWriteError::IOError(ref err) => write!(
f,
"an io error occurred while opening a file for writing: '{}'",
err
),
}
}
}
impl StdError for OpenWriteError {
fn description(&self) -> &str {
"error occurred while opening a file for writing"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenWriteError::FileAlreadyExists(_) => None,
OpenWriteError::IOError(ref err) => Some(err),
}
}
}
/// Type of index incompatibility between the library and the index found on disk /// Type of index incompatibility between the library and the index found on disk
/// Used to catch and provide a hint to solve this incompatibility issue /// Used to catch and provide a hint to solve this incompatibility issue
pub enum Incompatibility { pub enum Incompatibility {
@@ -123,47 +217,55 @@ impl fmt::Debug for Incompatibility {
} }
/// Error that may occur when accessing a file read /// Error that may occur when accessing a file read
#[derive(Debug, Error)] #[derive(Debug)]
pub enum OpenReadError { pub enum OpenReadError {
/// The file does not exists. /// The file does not exists.
#[error("Files does not exists: {0:?}")]
FileDoesNotExist(PathBuf),
/// Any kind of io::Error.
#[error(
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
)]
IOError {
/// The underlying `io::Error`.
io_error: io::Error,
/// File path of the file that tantivy failed to open for read.
filepath: PathBuf,
},
/// This library does not support the index version found in file footer.
#[error("Index version unsupported: {0:?}")]
IncompatibleIndex(Incompatibility),
}
impl OpenReadError {
/// Wraps an io error.
pub fn wrap_io_error(io_error: io::Error, filepath: PathBuf) -> Self {
Self::IOError { io_error, filepath }
}
}
/// Error that may occur when trying to delete a file
#[derive(Debug, Error)]
pub enum DeleteError {
/// The file does not exists.
#[error("File does not exists: '{0}'.")]
FileDoesNotExist(PathBuf), FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when /// Any kind of IO error that happens when
/// interacting with the underlying IO device. /// interacting with the underlying IO device.
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")] IOError(IOError),
IOError { /// This library doesn't support the index version found on disk
/// The underlying `io::Error`. IncompatibleIndex(Incompatibility),
io_error: io::Error, }
/// File path of the file that tantivy failed to delete.
filepath: PathBuf, impl From<IOError> for OpenReadError {
}, fn from(err: IOError) -> OpenReadError {
OpenReadError::IOError(err)
}
}
impl fmt::Display for OpenReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenReadError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
OpenReadError::IOError(ref err) => write!(
f,
"an io error occurred while opening a file for reading: '{}'",
err
),
OpenReadError::IncompatibleIndex(ref footer) => {
write!(f, "Incompatible index format: {:?}", footer)
}
}
}
}
/// Error that may occur when trying to delete a file
#[derive(Debug)]
pub enum DeleteError {
/// The file does not exists.
FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when
/// interacting with the underlying IO device.
IOError(IOError),
}
impl From<IOError> for DeleteError {
fn from(err: IOError) -> DeleteError {
DeleteError::IOError(err)
}
} }
impl From<Incompatibility> for OpenReadError { impl From<Incompatibility> for OpenReadError {
@@ -171,3 +273,29 @@ impl From<Incompatibility> for OpenReadError {
OpenReadError::IncompatibleIndex(incompatibility) OpenReadError::IncompatibleIndex(incompatibility)
} }
} }
impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DeleteError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
DeleteError::IOError(ref err) => {
write!(f, "an io error occurred while deleting a file: '{}'", err)
}
}
}
}
impl StdError for DeleteError {
fn description(&self) -> &str {
"error occurred while deleting a file"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
DeleteError::FileDoesNotExist(_) => None,
DeleteError::IOError(ref err) => Some(err),
}
}
}

View File

@@ -1,247 +0,0 @@
use stable_deref_trait::StableDeref;
use crate::common::HasLen;
use crate::directory::OwnedBytes;
use std::sync::{Arc, Weak};
use std::{io, ops::Deref};
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Objects that represents files sections in tantivy.
///
/// By contract, whatever happens to the directory file, as long as a FileHandle
/// is alive, the data associated with it cannot be altered or destroyed.
///
/// The underlying behavior is therefore specific to the `Directory` that created it.
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
/// on the filesystem.
pub trait FileHandle: 'static + Send + Sync + HasLen {
/// Reads a slice of bytes.
///
/// This method may panic if the range requested is invalid.
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes>;
}
impl FileHandle for &'static [u8] {
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
let bytes = &self[from..to];
Ok(OwnedBytes::new(bytes))
}
}
impl<T: Deref<Target = [u8]>> HasLen for T {
fn len(&self) -> usize {
self.as_ref().len()
}
}
impl<B> From<B> for FileSlice
where
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
{
fn from(bytes: B) -> FileSlice {
FileSlice::new(Box::new(OwnedBytes::new(bytes)))
}
}
/// Logical slice of read only file in tantivy.
//
/// It can be cloned and sliced cheaply.
///
#[derive(Clone)]
pub struct FileSlice {
data: Arc<dyn FileHandle>,
start: usize,
stop: usize,
}
impl FileSlice {
/// Wraps a FileHandle.
pub fn new(file_handle: Box<dyn FileHandle>) -> Self {
let num_bytes = file_handle.len();
FileSlice::new_with_num_bytes(file_handle, num_bytes)
}
/// Wraps a FileHandle.
#[doc(hidden)]
pub fn new_with_num_bytes(file_handle: Box<dyn FileHandle>, num_bytes: usize) -> Self {
FileSlice {
data: Arc::from(file_handle),
start: 0,
stop: num_bytes,
}
}
/// Creates a fileslice that is just a view over a slice of the data.
///
/// # Panics
///
/// Panics if `to < from` or if `to` exceeds the filesize.
pub fn slice(&self, from: usize, to: usize) -> FileSlice {
assert!(to <= self.len());
assert!(to >= from);
FileSlice {
data: self.data.clone(),
start: self.start + from,
stop: self.start + to,
}
}
/// Creates an empty FileSlice
pub fn empty() -> FileSlice {
const EMPTY_SLICE: &[u8] = &[];
FileSlice::from(EMPTY_SLICE)
}
/// Returns a `OwnedBytes` with all of the data in the `FileSlice`.
///
/// The behavior is strongly dependant on the implementation of the underlying
/// `Directory` and the `FileSliceTrait` it creates.
/// In particular, it is up to the `Directory` implementation
/// to handle caching if needed.
pub fn read_bytes(&self) -> io::Result<OwnedBytes> {
self.data.read_bytes(self.start, self.stop)
}
/// Reads a specific slice of data.
///
/// This is equivalent to running `file_slice.slice(from, to).read_bytes()`.
pub fn read_bytes_slice(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
assert!(from <= to);
assert!(
self.start + to <= self.stop,
"`to` exceeds the fileslice length"
);
self.data.read_bytes(self.start + from, self.start + to)
}
/// Splits the FileSlice at the given offset and return two file slices.
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
///
/// This operation is cheap and must not copy any underlying data.
pub fn split(self, left_len: usize) -> (FileSlice, FileSlice) {
let left = self.slice_to(left_len);
let right = self.slice_from(left_len);
(left, right)
}
/// Splits the file slice at the given offset and return two file slices.
/// `file_slice[..split_offset]` and `file_slice[split_offset..]`.
pub fn split_from_end(self, right_len: usize) -> (FileSlice, FileSlice) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Like `.slice(...)` but enforcing only the `from`
/// boundary.
///
/// Equivalent to `.slice(from_offset, self.len())`
pub fn slice_from(&self, from_offset: usize) -> FileSlice {
self.slice(from_offset, self.len())
}
/// Like `.slice(...)` but enforcing only the `to`
/// boundary.
///
/// Equivalent to `.slice(0, to_offset)`
pub fn slice_to(&self, to_offset: usize) -> FileSlice {
self.slice(0, to_offset)
}
}
impl FileHandle for FileSlice {
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
self.read_bytes_slice(from, to)
}
}
impl HasLen for FileSlice {
fn len(&self) -> usize {
self.stop - self.start
}
}
#[cfg(test)]
mod tests {
use super::{FileHandle, FileSlice};
use crate::common::HasLen;
use std::io;
#[test]
fn test_file_slice() -> io::Result<()> {
let file_slice = FileSlice::new(Box::new(b"abcdef".as_ref()));
assert_eq!(file_slice.len(), 6);
assert_eq!(file_slice.slice_from(2).read_bytes()?.as_slice(), b"cdef");
assert_eq!(file_slice.slice_to(2).read_bytes()?.as_slice(), b"ab");
assert_eq!(
file_slice
.slice_from(1)
.slice_to(2)
.read_bytes()?
.as_slice(),
b"bc"
);
{
let (left, right) = file_slice.clone().split(0);
assert_eq!(left.read_bytes()?.as_slice(), b"");
assert_eq!(right.read_bytes()?.as_slice(), b"abcdef");
}
{
let (left, right) = file_slice.clone().split(2);
assert_eq!(left.read_bytes()?.as_slice(), b"ab");
assert_eq!(right.read_bytes()?.as_slice(), b"cdef");
}
{
let (left, right) = file_slice.clone().split_from_end(0);
assert_eq!(left.read_bytes()?.as_slice(), b"abcdef");
assert_eq!(right.read_bytes()?.as_slice(), b"");
}
{
let (left, right) = file_slice.clone().split_from_end(2);
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
}
Ok(())
}
#[test]
fn test_file_slice_trait_slice_len() {
let blop: &'static [u8] = b"abc";
let owned_bytes: Box<dyn FileHandle> = Box::new(blop);
assert_eq!(owned_bytes.len(), 3);
}
#[test]
fn test_slice_simple_read() -> io::Result<()> {
let slice = FileSlice::new(Box::new(&b"abcdef"[..]));
assert_eq!(slice.len(), 6);
assert_eq!(slice.read_bytes()?.as_ref(), b"abcdef");
assert_eq!(slice.slice(1, 4).read_bytes()?.as_ref(), b"bcd");
Ok(())
}
#[test]
fn test_slice_read_slice() -> io::Result<()> {
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
assert_eq!(slice_deref.read_bytes_slice(1, 4)?.as_ref(), b"bcd");
Ok(())
}
#[test]
#[should_panic(expected = "assertion failed: from <= to")]
fn test_slice_read_slice_invalid_range() {
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
assert_eq!(slice_deref.read_bytes_slice(1, 0).unwrap().as_ref(), b"bcd");
}
#[test]
#[should_panic(expected = "`to` exceeds the fileslice length")]
fn test_slice_read_slice_invalid_range_exceeds() {
let slice_deref = FileSlice::new(Box::new(&b"abcdef"[..]));
assert_eq!(
slice_deref.read_bytes_slice(0, 10).unwrap().as_ref(),
b"bcd"
);
}
}

View File

@@ -1,178 +0,0 @@
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
use crc32fast::Hasher;
use std::fs;
use std::io;
use std::io::BufRead;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
// Watches a file and executes registered callbacks when the file is modified.
pub struct FileWatcher {
path: Arc<Path>,
callbacks: Arc<WatchCallbackList>,
state: Arc<AtomicUsize>, // 0: new, 1: runnable, 2: terminated
}
impl FileWatcher {
pub fn new(path: &Path) -> FileWatcher {
FileWatcher {
path: Arc::from(path),
callbacks: Default::default(),
state: Default::default(),
}
}
pub fn spawn(&self) {
if self.state.compare_and_swap(0, 1, Ordering::SeqCst) > 0 {
return;
}
let path = self.path.clone();
let callbacks = self.callbacks.clone();
let state = self.state.clone();
thread::Builder::new()
.name("thread-tantivy-meta-file-watcher".to_string())
.spawn(move || {
let mut current_checksum = None;
while state.load(Ordering::SeqCst) == 1 {
if let Ok(checksum) = FileWatcher::compute_checksum(&path) {
// `None.unwrap_or_else(|| !checksum) != checksum` evaluates to `true`
if current_checksum.unwrap_or_else(|| !checksum) != checksum {
info!("Meta file {:?} was modified", path);
current_checksum = Some(checksum);
futures::executor::block_on(callbacks.broadcast());
}
}
thread::sleep(POLLING_INTERVAL);
}
})
.expect("Failed to spawn meta file watcher thread");
}
pub fn watch(&self, callback: WatchCallback) -> WatchHandle {
let handle = self.callbacks.subscribe(callback);
self.spawn();
handle
}
fn compute_checksum(path: &Path) -> Result<u32, io::Error> {
let reader = match fs::File::open(path) {
Ok(f) => io::BufReader::new(f),
Err(e) => {
warn!("Failed to open meta file {:?}: {:?}", path, e);
return Err(e);
}
};
let mut hasher = Hasher::new();
for line in reader.lines() {
hasher.update(line?.as_bytes())
}
Ok(hasher.finalize())
}
}
impl Drop for FileWatcher {
fn drop(&mut self) {
self.state.store(2, Ordering::SeqCst);
}
}
#[cfg(test)]
mod tests {
use std::mem;
use crate::directory::mmap_directory::atomic_write;
use super::*;
#[test]
fn test_file_watcher_drop_watcher() -> crate::Result<()> {
let tmp_dir = tempfile::TempDir::new()?;
let tmp_file = tmp_dir.path().join("watched.txt");
let counter: Arc<AtomicUsize> = Default::default();
let (tx, rx) = crossbeam::channel::unbounded();
let timeout = Duration::from_millis(100);
let watcher = FileWatcher::new(&tmp_file);
let state = watcher.state.clone();
assert_eq!(state.load(Ordering::SeqCst), 0);
let counter_clone = counter.clone();
let _handle = watcher.watch(WatchCallback::new(move || {
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
tx.send(val + 1).unwrap();
}));
assert_eq!(counter.load(Ordering::SeqCst), 0);
assert_eq!(state.load(Ordering::SeqCst), 1);
atomic_write(&tmp_file, b"foo")?;
assert_eq!(rx.recv_timeout(timeout), Ok(1));
atomic_write(&tmp_file, b"foo")?;
assert!(rx.recv_timeout(timeout).is_err());
atomic_write(&tmp_file, b"bar")?;
assert_eq!(rx.recv_timeout(timeout), Ok(2));
mem::drop(watcher);
atomic_write(&tmp_file, b"qux")?;
thread::sleep(Duration::from_millis(10));
assert_eq!(counter.load(Ordering::SeqCst), 2);
assert_eq!(state.load(Ordering::SeqCst), 2);
Ok(())
}
#[test]
fn test_file_watcher_drop_handle() -> crate::Result<()> {
let tmp_dir = tempfile::TempDir::new()?;
let tmp_file = tmp_dir.path().join("watched.txt");
let counter: Arc<AtomicUsize> = Default::default();
let (tx, rx) = crossbeam::channel::unbounded();
let timeout = Duration::from_millis(100);
let watcher = FileWatcher::new(&tmp_file);
let state = watcher.state.clone();
assert_eq!(state.load(Ordering::SeqCst), 0);
let counter_clone = counter.clone();
let handle = watcher.watch(WatchCallback::new(move || {
let val = counter_clone.fetch_add(1, Ordering::SeqCst);
tx.send(val + 1).unwrap();
}));
assert_eq!(counter.load(Ordering::SeqCst), 0);
assert_eq!(state.load(Ordering::SeqCst), 1);
atomic_write(&tmp_file, b"foo")?;
assert_eq!(rx.recv_timeout(timeout), Ok(1));
mem::drop(handle);
atomic_write(&tmp_file, b"qux")?;
assert_eq!(counter.load(Ordering::SeqCst), 1);
assert_eq!(state.load(Ordering::SeqCst), 1);
Ok(())
}
}

View File

@@ -1,8 +1,9 @@
use crate::common::{BinarySerializable, CountingWriter, FixedSize, HasLen, VInt}; use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
use crate::directory::error::Incompatibility; use crate::directory::error::Incompatibility;
use crate::directory::FileSlice; use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite}; use crate::directory::{AntiCallToken, TerminatingWrite};
use crate::Version; use crate::Version;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use crc32fast::Hasher; use crc32fast::Hasher;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -63,26 +64,26 @@ impl Footer {
let mut counting_write = CountingWriter::wrap(&mut write); let mut counting_write = CountingWriter::wrap(&mut write);
self.serialize(&mut counting_write)?; self.serialize(&mut counting_write)?;
let written_len = counting_write.written_bytes(); let written_len = counting_write.written_bytes();
(written_len as u32).serialize(write)?; write.write_u32::<LittleEndian>(written_len as u32)?;
Ok(()) Ok(())
} }
pub fn extract_footer(file: FileSlice) -> io::Result<(Footer, FileSlice)> { pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
if file.len() < 4 { if source.len() < 4 {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::UnexpectedEof, io::ErrorKind::UnexpectedEof,
format!( format!(
"File corrupted. The file is smaller than 4 bytes (len={}).", "File corrupted. The file is smaller than 4 bytes (len={}).",
file.len() source.len()
), ),
)); ));
} }
let (body_footer, footer_len_file) = file.split_from_end(u32::SIZE_IN_BYTES); let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
let mut footer_len_bytes = footer_len_file.read_bytes()?; let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
let footer_len = u32::deserialize(&mut footer_len_bytes)? as usize; let body_len = body_footer.len() - footer_len;
let (body, footer) = body_footer.split_from_end(footer_len); let (body, footer_data) = body_footer.split(body_len);
let mut footer_bytes = footer.read_bytes()?; let mut cursor = footer_data.as_slice();
let footer = Footer::deserialize(&mut footer_bytes)?; let footer = Footer::deserialize(&mut cursor)?;
Ok((footer, body)) Ok((footer, body))
} }
@@ -93,36 +94,12 @@ impl Footer {
match &self.versioned_footer { match &self.versioned_footer {
VersionedFooter::V1 { VersionedFooter::V1 {
crc32: _crc, crc32: _crc,
store_compression, store_compression: compression,
} => { } => {
if &library_version.store_compression != store_compression { if &library_version.store_compression != compression {
return Err(Incompatibility::CompressionMismatch { return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(), library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(), index_compression_format: compression.to_string(),
});
}
Ok(())
}
VersionedFooter::V2 {
crc32: _crc,
store_compression,
} => {
if &library_version.store_compression != store_compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(),
});
}
Ok(())
}
VersionedFooter::V3 {
crc32: _crc,
store_compression,
} => {
if &library_version.store_compression != store_compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(),
}); });
} }
Ok(()) Ok(())
@@ -143,36 +120,24 @@ pub enum VersionedFooter {
crc32: CrcHashU32, crc32: CrcHashU32,
store_compression: String, store_compression: String,
}, },
// Introduction of the Block WAND information.
V2 {
crc32: CrcHashU32,
store_compression: String,
},
// Block wand max termfred on 1 byte
V3 {
crc32: CrcHashU32,
store_compression: String,
},
} }
impl BinarySerializable for VersionedFooter { impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new(); let mut buf = Vec::new();
match self { match self {
VersionedFooter::V3 { VersionedFooter::V1 {
crc32, crc32,
store_compression: compression, store_compression: compression,
} => { } => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown // Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ] // [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ] // [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&3u32, &mut buf)?; BinarySerializable::serialize(&1u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?; BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?; BinarySerializable::serialize(compression, &mut buf)?;
} }
VersionedFooter::V2 { .. } VersionedFooter::UnknownVersion => {
| VersionedFooter::V1 { .. }
| VersionedFooter::UnknownVersion => {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::InvalidInput, io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ", "Cannot serialize an unknown versioned footer ",
@@ -201,36 +166,22 @@ impl BinarySerializable for VersionedFooter {
reader.read_exact(&mut buf[..])?; reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..]; let mut cursor = &buf[..];
let version = u32::deserialize(&mut cursor)?; let version = u32::deserialize(&mut cursor)?;
if version > 3 { if version == 1 {
return Ok(VersionedFooter::UnknownVersion); let crc32 = u32::deserialize(&mut cursor)?;
} let compression = String::deserialize(&mut cursor)?;
let crc32 = u32::deserialize(&mut cursor)?; Ok(VersionedFooter::V1 {
let store_compression = String::deserialize(&mut cursor)?;
Ok(if version == 1 {
VersionedFooter::V1 {
crc32, crc32,
store_compression, store_compression: compression,
} })
} else if version == 2 {
VersionedFooter::V2 {
crc32,
store_compression,
}
} else { } else {
assert_eq!(version, 3); Ok(VersionedFooter::UnknownVersion)
VersionedFooter::V3 { }
crc32,
store_compression,
}
})
} }
} }
impl VersionedFooter { impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> { pub fn crc(&self) -> Option<CrcHashU32> {
match self { match self {
VersionedFooter::V3 { crc32, .. } => Some(*crc32),
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
VersionedFooter::V1 { crc32, .. } => Some(*crc32), VersionedFooter::V1 { crc32, .. } => Some(*crc32),
VersionedFooter::UnknownVersion { .. } => None, VersionedFooter::UnknownVersion { .. } => None,
} }
@@ -268,7 +219,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> { impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> { fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc32 = self.hasher.take().unwrap().finalize(); let crc32 = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V3 { let footer = Footer::new(VersionedFooter::V1 {
crc32, crc32,
store_compression: crate::store::COMPRESSION.to_string(), store_compression: crate::store::COMPRESSION.to_string(),
}); });
@@ -295,17 +246,17 @@ mod tests {
let mut vec = Vec::new(); let mut vec = Vec::new();
let footer_proxy = FooterProxy::new(&mut vec); let footer_proxy = FooterProxy::new(&mut vec);
assert!(footer_proxy.terminate().is_ok()); assert!(footer_proxy.terminate().is_ok());
if crate::store::COMPRESSION == "lz4" { assert_eq!(vec.len(), 167);
assert_eq!(vec.len(), 158);
} else {
assert_eq!(vec.len(), 167);
}
let footer = Footer::deserialize(&mut &vec[..]).unwrap(); let footer = Footer::deserialize(&mut &vec[..]).unwrap();
assert!(matches!( if let VersionedFooter::V1 {
footer.versioned_footer, crc32: _,
VersionedFooter::V3 { store_compression, .. } store_compression,
if store_compression == crate::store::COMPRESSION } = footer.versioned_footer
)); {
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version()); assert_eq!(&footer.version, crate::version());
} }
@@ -313,7 +264,7 @@ mod tests {
fn test_serialize_deserialize_footer() { fn test_serialize_deserialize_footer() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
let crc32 = 123456u32; let crc32 = 123456u32;
let footer: Footer = Footer::new(VersionedFooter::V3 { let footer: Footer = Footer::new(VersionedFooter::V1 {
crc32, crc32,
store_compression: "lz4".to_string(), store_compression: "lz4".to_string(),
}); });
@@ -325,7 +276,7 @@ mod tests {
#[test] #[test]
fn footer_length() { fn footer_length() {
let crc32 = 1111111u32; let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V3 { let versioned_footer = VersionedFooter::V1 {
crc32, crc32,
store_compression: "lz4".to_string(), store_compression: "lz4".to_string(),
}; };
@@ -346,7 +297,7 @@ mod tests {
// versionned footer length // versionned footer length
12 | 128, 12 | 128,
// index format version // index format version
3, 1,
0, 0,
0, 0,
0, 0,
@@ -365,7 +316,7 @@ mod tests {
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap(); let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty()); assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32; let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V3 { let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
crc32: expected_crc, crc32: expected_crc,
store_compression: "lz4".to_string(), store_compression: "lz4".to_string(),
}; };

View File

@@ -1,11 +1,11 @@
use crate::core::{MANAGED_FILEPATH, META_FILEPATH}; use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy}; use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock;
use crate::directory::GarbageCollectionResult; use crate::directory::GarbageCollectionResult;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::META_LOCK; use crate::directory::META_LOCK;
use crate::directory::{DirectoryLock, FileHandle}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{FileSlice, WritePtr};
use crate::directory::{WatchCallback, WatchHandle}; use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
@@ -53,7 +53,7 @@ struct MetaInformation {
/// Saves the file containing the list of existing files /// Saves the file containing the list of existing files
/// that were created by tantivy. /// that were created by tantivy.
fn save_managed_paths( fn save_managed_paths(
directory: &dyn Directory, directory: &mut dyn Directory,
wlock: &RwLockWriteGuard<'_, MetaInformation>, wlock: &RwLockWriteGuard<'_, MetaInformation>,
) -> io::Result<()> { ) -> io::Result<()> {
let mut w = serde_json::to_vec(&wlock.managed_paths)?; let mut w = serde_json::to_vec(&wlock.managed_paths)?;
@@ -86,7 +86,7 @@ impl ManagedDirectory {
directory: Box::new(directory), directory: Box::new(directory),
meta_informations: Arc::default(), meta_informations: Arc::default(),
}), }),
io_err @ Err(OpenReadError::IOError { .. }) => Err(io_err.err().unwrap().into()), Err(OpenReadError::IOError(e)) => Err(From::from(e)),
Err(OpenReadError::IncompatibleIndex(incompatibility)) => { Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
// For the moment, this should never happen `meta.json` // For the moment, this should never happen `meta.json`
// do not have any footer and cannot detect incompatibility. // do not have any footer and cannot detect incompatibility.
@@ -168,7 +168,7 @@ impl ManagedDirectory {
DeleteError::FileDoesNotExist(_) => { DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete.clone()); deleted_files.push(file_to_delete.clone());
} }
DeleteError::IOError { .. } => { DeleteError::IOError(_) => {
failed_to_delete_files.push(file_to_delete.clone()); failed_to_delete_files.push(file_to_delete.clone());
if !cfg!(target_os = "windows") { if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file // On windows, delete is expected to fail if the file
@@ -212,7 +212,7 @@ impl ManagedDirectory {
/// File starting by "." are reserved to locks. /// File starting by "." are reserved to locks.
/// They are not managed and cannot be subjected /// They are not managed and cannot be subjected
/// to garbage collection. /// to garbage collection.
fn register_file_as_managed(&self, filepath: &Path) -> io::Result<()> { fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
// Files starting by "." (e.g. lock files) are not managed. // Files starting by "." (e.g. lock files) are not managed.
if !is_managed(filepath) { if !is_managed(filepath) {
return Ok(()); return Ok(());
@@ -223,7 +223,7 @@ impl ManagedDirectory {
.expect("Managed file lock poisoned"); .expect("Managed file lock poisoned");
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned()); let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
if has_changed { if has_changed {
save_managed_paths(self.directory.as_ref(), &meta_wlock)?; save_managed_paths(self.directory.as_mut(), &meta_wlock)?;
} }
Ok(()) Ok(())
} }
@@ -231,19 +231,10 @@ impl ManagedDirectory {
/// Verify checksum of a managed file /// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> { pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?; let reader = self.directory.open_read(path)?;
let (footer, data) = let (footer, data) = Footer::extract_footer(reader)
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError { .map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
io_error,
filepath: path.to_path_buf(),
})?;
let bytes = data
.read_bytes()
.map_err(|io_error| OpenReadError::IOError {
filepath: path.to_path_buf(),
io_error,
})?;
let mut hasher = Hasher::new(); let mut hasher = Hasher::new();
hasher.update(bytes.as_slice()); hasher.update(data.as_slice());
let crc = hasher.finalize(); let crc = hasher.finalize();
Ok(footer Ok(footer
.versioned_footer .versioned_footer
@@ -254,42 +245,35 @@ impl ManagedDirectory {
/// List files for which checksum does not match content /// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> { pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut managed_paths = self let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations .meta_informations
.read() .read()
.expect("Managed directory rlock poisoned in list damaged.") .expect("Managed directory rlock poisoned in list damaged.")
.managed_paths .managed_paths
.clone(); .clone();
managed_paths.remove(*META_FILEPATH); for path in managed_paths.into_iter() {
let mut damaged_files = HashSet::new();
for path in managed_paths {
if !self.validate_checksum(&path)? { if !self.validate_checksum(&path)? {
damaged_files.insert(path); hashset.insert(path);
} }
} }
Ok(damaged_files) Ok(hashset)
} }
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let file_slice = self.open_read(path)?; let read_only_source = self.directory.open_read(path)?;
Ok(Box::new(file_slice)) let (footer, reader) = Footer::extract_footer(read_only_source)
} .map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
let file_slice = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(file_slice)
.map_err(|io_error| OpenReadError::wrap_io_error(io_error, path.to_path_buf()))?;
footer.is_compatible()?; footer.is_compatible()?;
Ok(reader) Ok(reader)
} }
fn open_write(&self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new( Ok(io::BufWriter::new(Box::new(FooterProxy::new(
self.directory self.directory
.open_write(path)? .open_write(path)?
@@ -299,7 +283,7 @@ impl Directory for ManagedDirectory {
)))) ))))
} }
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
self.register_file_as_managed(path)?; self.register_file_as_managed(path)?;
self.directory.atomic_write(path, data) self.directory.atomic_write(path, data)
} }
@@ -312,7 +296,7 @@ impl Directory for ManagedDirectory {
self.directory.delete(path) self.directory.delete(path)
} }
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> { fn exists(&self, path: &Path) -> bool {
self.directory.exists(path) self.directory.exists(path)
} }
@@ -360,22 +344,22 @@ mod tests_mmap_specific {
managed_directory managed_directory
.atomic_write(test_path2, &[0u8, 1u8]) .atomic_write(test_path2, &[0u8, 1u8])
.unwrap(); .unwrap();
assert!(managed_directory.exists(test_path1).unwrap()); assert!(managed_directory.exists(test_path1));
assert!(managed_directory.exists(test_path2).unwrap()); assert!(managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect(); let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); assert!(managed_directory.garbage_collect(|| living_files).is_ok());
assert!(managed_directory.exists(test_path1).unwrap()); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2).unwrap()); assert!(!managed_directory.exists(test_path2));
} }
{ {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
assert!(managed_directory.exists(test_path1).unwrap()); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2).unwrap()); assert!(!managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = HashSet::new(); let living_files: HashSet<PathBuf> = HashSet::new();
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); assert!(managed_directory.garbage_collect(|| living_files).is_ok());
assert!(!managed_directory.exists(test_path1).unwrap()); assert!(!managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2).unwrap()); assert!(!managed_directory.exists(test_path2));
} }
} }
@@ -392,7 +376,7 @@ mod tests_mmap_specific {
let mut write = managed_directory.open_write(test_path1).unwrap(); let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap(); write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap(); write.terminate().unwrap();
assert!(managed_directory.exists(test_path1).unwrap()); assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap(); let _mmap_read = managed_directory.open_read(test_path1).unwrap();
assert!(managed_directory assert!(managed_directory
@@ -400,50 +384,52 @@ mod tests_mmap_specific {
.is_ok()); .is_ok());
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped. // On Windows, gc should try and fail the file as it is mmapped.
assert!(managed_directory.exists(test_path1).unwrap()); assert!(managed_directory.exists(test_path1));
// unmap should happen here. // unmap should happen here.
drop(_mmap_read); drop(_mmap_read);
// The file should still be in the list of managed file and // The file should still be in the list of managed file and
// eventually be deleted once mmap is released. // eventually be deleted once mmap is released.
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); assert!(managed_directory.garbage_collect(|| living_files).is_ok());
assert!(!managed_directory.exists(test_path1).unwrap()); assert!(!managed_directory.exists(test_path1));
} else { } else {
assert!(!managed_directory.exists(test_path1).unwrap()); assert!(!managed_directory.exists(test_path1));
} }
} }
#[test] #[test]
fn test_checksum() -> crate::Result<()> { fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test"); let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path"); let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path)?; let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let managed_directory = ManagedDirectory::wrap(mmap_directory)?; let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1)?; let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8])?; write.write_all(&[0u8, 1u8]).unwrap();
write.terminate()?; write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2)?; let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8])?; write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate()?; write.terminate().unwrap();
let read_file = managed_directory.open_read(test_path2)?.read_bytes()?; let read_source = managed_directory.open_read(test_path2).unwrap();
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]); assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
assert!(managed_directory.list_damaged().unwrap().is_empty()); assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone(); let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2); corrupted_path.push(test_path2);
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?; let mut file = OpenOptions::new()
file.write_all(&[255u8])?; .write(true)
file.flush()?; .open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file); drop(file);
let damaged = managed_directory.list_damaged()?; let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1); assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2)); assert!(damaged.contains(test_path2));
Ok(())
} }
} }

View File

@@ -1,19 +1,25 @@
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::error::LockError; use crate::directory::error::LockError;
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError}; use crate::directory::error::{
use crate::directory::file_watcher::FileWatcher; DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback; use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle; use crate::directory::WatchHandle;
use crate::directory::{AntiCallToken, FileHandle, OwnedBytes};
use crate::directory::{ArcBytes, WeakArcBytes};
use crate::directory::{TerminatingWrite, WritePtr}; use crate::directory::{TerminatingWrite, WritePtr};
use fs2::FileExt; use fs2::FileExt;
use memmap::Mmap; use memmap::Mmap;
use notify::RawEvent;
use notify::RecursiveMode;
use notify::Watcher;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use stable_deref_trait::StableDeref; use std::collections::HashMap;
use std::convert::From; use std::convert::From;
use std::fmt; use std::fmt;
use std::fs::OpenOptions; use std::fs::OpenOptions;
@@ -22,9 +28,12 @@ use std::io::{self, Seek, SeekFrom};
use std::io::{BufWriter, Read, Write}; use std::io::{BufWriter, Read, Write};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::result; use std::result;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc; use std::sync::Arc;
use std::sync::Mutex;
use std::sync::RwLock; use std::sync::RwLock;
use std::{collections::HashMap, ops::Deref}; use std::sync::Weak;
use std::thread;
use tempfile::TempDir; use tempfile::TempDir;
/// Create a default io error given a string. /// Create a default io error given a string.
@@ -35,17 +44,17 @@ pub(crate) fn make_io_err(msg: String) -> io::Error {
/// Returns None iff the file exists, can be read, but is empty (and hence /// Returns None iff the file exists, can be read, but is empty (and hence
/// cannot be mmapped) /// cannot be mmapped)
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> { fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
let file = File::open(full_path).map_err(|io_err| { let file = File::open(full_path).map_err(|e| {
if io_err.kind() == io::ErrorKind::NotFound { if e.kind() == io::ErrorKind::NotFound {
OpenReadError::FileDoesNotExist(full_path.to_path_buf()) OpenReadError::FileDoesNotExist(full_path.to_owned())
} else { } else {
OpenReadError::wrap_io_error(io_err, full_path.to_path_buf()) OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
} }
})?; })?;
let meta_data = file let meta_data = file
.metadata() .metadata()
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_owned()))?; .map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
if meta_data.len() == 0 { if meta_data.len() == 0 {
// if the file size is 0, it will not be possible // if the file size is 0, it will not be possible
// to mmap the file, so we return None // to mmap the file, so we return None
@@ -55,7 +64,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
unsafe { unsafe {
memmap::Mmap::map(&file) memmap::Mmap::map(&file)
.map(Some) .map(Some)
.map_err(|io_err| OpenReadError::wrap_io_error(io_err, full_path.to_path_buf())) .map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
} }
} }
@@ -76,7 +85,7 @@ pub struct CacheInfo {
struct MmapCache { struct MmapCache {
counters: CacheCounters, counters: CacheCounters,
cache: HashMap<PathBuf, WeakArcBytes>, cache: HashMap<PathBuf, Weak<BoxedData>>,
} }
impl Default for MmapCache { impl Default for MmapCache {
@@ -110,7 +119,7 @@ impl MmapCache {
} }
// Returns None if the file exists but as a len of 0 (and hence is not mmappable). // Returns None if the file exists but as a len of 0 (and hence is not mmappable).
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<ArcBytes>, OpenReadError> { fn get_mmap(&mut self, full_path: &Path) -> Result<Option<Arc<BoxedData>>, OpenReadError> {
if let Some(mmap_weak) = self.cache.get(full_path) { if let Some(mmap_weak) = self.cache.get(full_path) {
if let Some(mmap_arc) = mmap_weak.upgrade() { if let Some(mmap_arc) = mmap_weak.upgrade() {
self.counters.hit += 1; self.counters.hit += 1;
@@ -121,7 +130,7 @@ impl MmapCache {
self.counters.miss += 1; self.counters.miss += 1;
let mmap_opt = open_mmap(full_path)?; let mmap_opt = open_mmap(full_path)?;
Ok(mmap_opt.map(|mmap| { Ok(mmap_opt.map(|mmap| {
let mmap_arc: ArcBytes = Arc::new(mmap); let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
let mmap_weak = Arc::downgrade(&mmap_arc); let mmap_weak = Arc::downgrade(&mmap_arc);
self.cache.insert(full_path.to_owned(), mmap_weak); self.cache.insert(full_path.to_owned(), mmap_weak);
mmap_arc mmap_arc
@@ -129,6 +138,63 @@ impl MmapCache {
} }
} }
struct WatcherWrapper {
_watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: Arc<WatchCallbackList>,
}
impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let watcher = notify::raw_watcher(tx)
.and_then(|mut watcher| {
watcher.watch(path, RecursiveMode::Recursive)?;
Ok(watcher)
})
.map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_router: Arc<WatchCallbackList> = Default::default();
let watcher_router_clone = watcher_router.clone();
thread::Builder::new()
.name("meta-file-watch-thread".to_string())
.spawn(move || {
loop {
match watcher_recv.recv().map(|evt| evt.path) {
Ok(Some(changed_path)) => {
// ... Actually subject to false positive.
// We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH {
let _ = watcher_router_clone.broadcast();
}
}
}
Ok(None) => {
// not an event we are interested in.
}
Err(_e) => {
// the watch send channel was dropped
break;
}
}
}
})?;
Ok(WatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router,
})
}
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
self.watcher_router.subscribe(watch_callback)
}
}
/// Directory storing data in files, read via mmap. /// Directory storing data in files, read via mmap.
/// ///
/// The Mmap object are cached to limit the /// The Mmap object are cached to limit the
@@ -150,21 +216,40 @@ struct MmapDirectoryInner {
root_path: PathBuf, root_path: PathBuf,
mmap_cache: RwLock<MmapCache>, mmap_cache: RwLock<MmapCache>,
_temp_directory: Option<TempDir>, _temp_directory: Option<TempDir>,
watcher: FileWatcher, watcher: RwLock<Option<WatcherWrapper>>,
} }
impl MmapDirectoryInner { impl MmapDirectoryInner {
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner { fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
MmapDirectoryInner { MmapDirectoryInner {
root_path,
mmap_cache: Default::default(), mmap_cache: Default::default(),
_temp_directory: temp_directory, _temp_directory: temp_directory,
watcher: FileWatcher::new(&root_path.join(*META_FILEPATH)), watcher: RwLock::new(None),
root_path,
} }
} }
fn watch(&self, callback: WatchCallback) -> crate::Result<WatchHandle> { fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
Ok(self.watcher.watch(callback)) // a lot of juggling here, to ensure we don't do anything that panics
// while the rwlock is held. That way we ensure that the rwlock cannot
// be poisoned.
//
// The downside is that we might create a watch wrapper that is not useful.
let need_initialization = self.watcher.read().unwrap().is_none();
if need_initialization {
let watch_wrapper = WatcherWrapper::new(&self.root_path)?;
let mut watch_wlock = self.watcher.write().unwrap();
// the watcher could have been initialized when we released the lock, and
// we do not want to lose the watched files that were set.
if watch_wlock.is_none() {
*watch_wlock = Some(watch_wrapper);
}
}
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
Ok(watch_wrapper.watch(watch_callback))
} else {
unreachable!("At this point, watch wrapper is supposed to be initialized");
}
} }
} }
@@ -187,11 +272,9 @@ impl MmapDirectory {
/// This is mostly useful to test the MmapDirectory itself. /// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RAMDirectory. /// For your unit tests, prefer the RAMDirectory.
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> { pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?; let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
Ok(MmapDirectory::new( let tempdir_path = PathBuf::from(tempdir.path());
tempdir.path().to_path_buf(), Ok(MmapDirectory::new(tempdir_path, Some(tempdir)))
Some(tempdir),
))
} }
/// Opens a MmapDirectory in a directory. /// Opens a MmapDirectory in a directory.
@@ -313,38 +396,8 @@ impl TerminatingWrite for SafeFileWriter {
} }
} }
#[derive(Clone)]
struct MmapArc(Arc<dyn Deref<Target = [u8]> + Send + Sync>);
impl Deref for MmapArc {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.0.deref()
}
}
unsafe impl StableDeref for MmapArc {}
/// Writes a file in an atomic manner.
pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
// We create the temporary file in the same directory as the target file.
// Indeed the canonical temp directory and the target file might sit in different
// filesystem, in which case the atomic write may actually not work.
let parent_path = path.parent().ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Path {:?} does not have parent directory.",
)
})?;
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
tempfile.write_all(content)?;
tempfile.flush()?;
tempfile.into_temp_path().persist(path)?;
Ok(())
}
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn get_file_handle(&self, path: &Path) -> result::Result<Box<dyn FileHandle>, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path); debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -354,19 +407,12 @@ impl Directory for MmapDirectory {
on mmap cache while reading {:?}", on mmap cache while reading {:?}",
path path
); );
let io_err = make_io_err(msg); IOError::with_path(path.to_owned(), make_io_err(msg))
OpenReadError::wrap_io_error(io_err, path.to_path_buf())
})?; })?;
Ok(mmap_cache
let owned_bytes = mmap_cache
.get_mmap(&full_path)? .get_mmap(&full_path)?
.map(|mmap_arc| { .map(ReadOnlySource::from)
let mmap_arc_obj = MmapArc(mmap_arc); .unwrap_or_else(ReadOnlySource::empty))
OwnedBytes::new(mmap_arc_obj)
})
.unwrap_or_else(OwnedBytes::empty);
Ok(Box::new(owned_bytes))
} }
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
@@ -374,29 +420,25 @@ impl Directory for MmapDirectory {
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
match fs::remove_file(&full_path) { match fs::remove_file(&full_path) {
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError { Ok(_) => self
io_error: e, .sync_directory()
filepath: path.to_path_buf(), .map_err(|e| IOError::with_path(path.to_owned(), e).into()),
}),
Err(e) => { Err(e) => {
if e.kind() == io::ErrorKind::NotFound { if e.kind() == io::ErrorKind::NotFound {
Err(DeleteError::FileDoesNotExist(path.to_owned())) Err(DeleteError::FileDoesNotExist(path.to_owned()))
} else { } else {
Err(DeleteError::IOError { Err(IOError::with_path(path.to_owned(), e).into())
io_error: e,
filepath: path.to_path_buf(),
})
} }
} }
} }
} }
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> { fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
Ok(full_path.exists()) full_path.exists()
} }
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path); debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -405,22 +447,22 @@ impl Directory for MmapDirectory {
.create_new(true) .create_new(true)
.open(full_path); .open(full_path);
let mut file = open_res.map_err(|io_err| { let mut file = open_res.map_err(|err| {
if io_err.kind() == io::ErrorKind::AlreadyExists { if err.kind() == io::ErrorKind::AlreadyExists {
OpenWriteError::FileAlreadyExists(path.to_path_buf()) OpenWriteError::FileAlreadyExists(path.to_owned())
} else { } else {
OpenWriteError::wrap_io_error(io_err, path.to_path_buf()) IOError::with_path(path.to_owned(), err).into()
} }
})?; })?;
// making sure the file is created. // making sure the file is created.
file.flush() file.flush()
.map_err(|io_error| OpenWriteError::wrap_io_error(io_error, path.to_path_buf()))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
// Apparetntly, on some filesystem syncing the parent // Apparetntly, on some filesystem syncing the parent
// directory is required. // directory is required.
self.sync_directory() self.sync_directory()
.map_err(|io_err| OpenWriteError::wrap_io_error(io_err, path.to_path_buf()))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
let writer = SafeFileWriter::new(file); let writer = SafeFileWriter::new(file);
Ok(BufWriter::new(Box::new(writer))) Ok(BufWriter::new(Box::new(writer)))
@@ -431,26 +473,26 @@ impl Directory for MmapDirectory {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
match File::open(&full_path) { match File::open(&full_path) {
Ok(mut file) => { Ok(mut file) => {
file.read_to_end(&mut buffer).map_err(|io_error| { file.read_to_end(&mut buffer)
OpenReadError::wrap_io_error(io_error, path.to_path_buf()) .map_err(|e| IOError::with_path(path.to_owned(), e))?;
})?;
Ok(buffer) Ok(buffer)
} }
Err(io_error) => { Err(e) => {
if io_error.kind() == io::ErrorKind::NotFound { if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned())) Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else { } else {
Err(OpenReadError::wrap_io_error(io_error, path.to_path_buf())) Err(IOError::with_path(path.to_owned(), e).into())
} }
} }
} }
} }
fn atomic_write(&self, path: &Path, content: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path); debug!("Atomic Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
atomic_write(&full_path, content)?; let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
self.sync_directory() meta_file.write(|f| f.write_all(data))?;
Ok(())
} }
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> { fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
@@ -485,10 +527,12 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use crate::indexer::LogMergePolicy;
use crate::schema::{Schema, SchemaBuilder, TEXT}; use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use crate::{common::HasLen, indexer::LogMergePolicy}; use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering};
#[test] #[test]
fn test_open_non_existent_path() { fn test_open_non_existent_path() {
@@ -501,7 +545,7 @@ mod tests {
// cannot be mmapped. // cannot be mmapped.
// //
// In that case the directory returns a SharedVecSlice. // In that case the directory returns a SharedVecSlice.
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap(); let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
let path = PathBuf::from("test"); let path = PathBuf::from("test");
{ {
let mut w = mmap_directory.open_write(&path).unwrap(); let mut w = mmap_directory.open_write(&path).unwrap();
@@ -517,7 +561,7 @@ mod tests {
// here we test if the cache releases // here we test if the cache releases
// mmaps correctly. // mmaps correctly.
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap(); let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
let num_paths = 10; let num_paths = 10;
let paths: Vec<PathBuf> = (0..num_paths) let paths: Vec<PathBuf> = (0..num_paths)
.map(|i| PathBuf::from(&*format!("file_{}", i))) .map(|i| PathBuf::from(&*format!("file_{}", i)))
@@ -577,6 +621,27 @@ mod tests {
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0); assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
} }
#[test]
fn test_watch_wrapper() {
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join(*META_FILEPATH);
let _handle = watch_wrapper.watch(Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
}));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle2 = watch_wrapper.watch(Box::new(move || {
let _ = sender.send(());
}));
assert_eq!(counter.load(Ordering::SeqCst), 0);
fs::write(&tmp_file, b"whateverwilldo").unwrap();
assert!(receiver.recv().is_ok());
assert!(counter.load(Ordering::SeqCst) >= 1);
}
#[test] #[test]
fn test_mmap_released() { fn test_mmap_released() {
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap(); let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
@@ -587,7 +652,7 @@ mod tests {
{ {
let index = Index::create(mmap_directory.clone(), schema).unwrap(); let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut log_merge_policy = LogMergePolicy::default(); let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_merge_size(3); log_merge_policy.set_min_merge_size(3);
index_writer.set_merge_policy(Box::new(log_merge_policy)); index_writer.set_merge_policy(Box::new(log_merge_policy));

View File

@@ -9,12 +9,10 @@ mod mmap_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod file_slice;
mod file_watcher;
mod footer; mod footer;
mod managed_directory; mod managed_directory;
mod owned_bytes;
mod ram_directory; mod ram_directory;
mod read_only_source;
mod watch_event_router; mod watch_event_router;
/// Errors specific to the directory module. /// Errors specific to the directory module.
@@ -23,14 +21,11 @@ pub mod error;
pub use self::directory::DirectoryLock; pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone}; pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
pub use self::file_slice::{FileHandle, FileSlice};
pub use self::owned_bytes::OwnedBytes;
pub use self::ram_directory::RAMDirectory; pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle}; pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
use std::io::{self, BufWriter, Write}; use std::io::{self, BufWriter, Write};
use std::path::PathBuf; use std::path::PathBuf;
/// Outcome of the Garbage collection /// Outcome of the Garbage collection
pub struct GarbageCollectionResult { pub struct GarbageCollectionResult {
/// List of files that were deleted in this cycle /// List of files that were deleted in this cycle

View File

@@ -1,290 +0,0 @@
use crate::directory::FileHandle;
use stable_deref_trait::StableDeref;
use std::convert::TryInto;
use std::mem;
use std::ops::Deref;
use std::sync::Arc;
use std::{fmt, io};
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a static slice.
///
/// The backing object is required to be `StableDeref`.
#[derive(Clone)]
pub struct OwnedBytes {
data: &'static [u8],
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
}
impl FileHandle for OwnedBytes {
fn read_bytes(&self, from: usize, to: usize) -> io::Result<OwnedBytes> {
Ok(self.slice(from, to))
}
}
impl OwnedBytes {
/// Creates an empty `OwnedBytes`.
pub fn empty() -> OwnedBytes {
OwnedBytes::new(&[][..])
}
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
data_holder: T,
) -> OwnedBytes {
let box_stable_deref = Arc::new(data_holder);
let bytes: &[u8] = box_stable_deref.as_ref();
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
OwnedBytes {
box_stable_deref,
data,
}
}
/// creates a fileslice that is just a view over a slice of the data.
pub fn slice(&self, from: usize, to: usize) -> Self {
OwnedBytes {
data: &self.data[from..to],
box_stable_deref: self.box_stable_deref.clone(),
}
}
/// Returns the underlying slice of data.
/// `Deref` and `AsRef` are also available.
#[inline(always)]
pub fn as_slice(&self) -> &[u8] {
self.data
}
/// Returns the len of the slice.
#[inline(always)]
pub fn len(&self) -> usize {
self.data.len()
}
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
///
/// Left will hold `split_len` bytes.
///
/// This operation is cheap and does not require to copy any memory.
/// On the other hand, both `left` and `right` retain a handle over
/// the entire slice of memory. In other words, the memory will only
/// be released when both left and right are dropped.
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
let right_box_stable_deref = self.box_stable_deref.clone();
let left = OwnedBytes {
data: &self.data[..split_len],
box_stable_deref: self.box_stable_deref,
};
let right = OwnedBytes {
data: &self.data[split_len..],
box_stable_deref: right_box_stable_deref,
};
(left, right)
}
/// Returns true iff this `OwnedBytes` is empty.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.as_slice().is_empty()
}
/// Drops the left most `advance_len` bytes.
///
/// See also [.clip(clip_len: usize))](#method.clip).
#[inline(always)]
pub fn advance(&mut self, advance_len: usize) {
self.data = &self.data[advance_len..]
}
/// Reads an `u8` from the `OwnedBytes` and advance by one byte.
pub fn read_u8(&mut self) -> u8 {
assert!(!self.is_empty());
let byte = self.as_slice()[0];
self.advance(1);
byte
}
/// Reads an `u64` encoded as little-endian from the `OwnedBytes` and advance by 8 bytes.
pub fn read_u64(&mut self) -> u64 {
assert!(self.len() > 7);
let octlet: [u8; 8] = self.as_slice()[..8].try_into().unwrap();
self.advance(8);
u64::from_le_bytes(octlet)
}
}
impl fmt::Debug for OwnedBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// We truncate the bytes in order to make sure the debug string
// is not too long.
let bytes_truncated: &[u8] = if self.len() > 8 {
&self.as_slice()[..10]
} else {
self.as_slice()
};
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
}
}
impl Deref for OwnedBytes {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl io::Read for OwnedBytes {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
if data.len() >= buf.len() {
let buf_len = buf.len();
buf.copy_from_slice(&data[..buf_len]);
buf.len()
} else {
let data_len = data.len();
buf[..data_len].copy_from_slice(data);
data_len
}
};
self.advance(read_len);
Ok(read_len)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
buf.extend(data);
data.len()
};
self.advance(read_len);
Ok(read_len)
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let read_len = self.read(buf)?;
if read_len != buf.len() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer",
));
}
Ok(())
}
}
impl AsRef<[u8]> for OwnedBytes {
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
#[cfg(test)]
mod tests {
use std::io::{self, Read};
use super::OwnedBytes;
#[test]
fn test_owned_bytes_debug() {
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
assert_eq!(
format!("{:?}", short_bytes),
"OwnedBytes([97, 98, 99, 100], len=4)"
);
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
assert_eq!(
format!("{:?}", long_bytes),
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
);
}
#[test]
fn test_owned_bytes_read() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
{
let mut buf = [0u8; 5];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
}
{
let mut buf = [0u8; 2];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"fg");
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
}
Ok(())
}
#[test]
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 5];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
assert_eq!(&buf, b"abcde");
Ok(())
}
#[test]
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 7];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf[..5], b"abcde");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_to_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = Vec::new();
bytes.read_to_end(&mut buf)?;
assert_eq!(buf.as_slice(), b"abcde".as_ref());
Ok(())
}
#[test]
fn test_owned_bytes_read_u8() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\xFF".as_ref());
assert_eq!(bytes.read_u8(), 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_u64() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF".as_ref());
assert_eq!(bytes.read_u64(), u64::MAX - 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_split() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
let (left, right) = bytes.split(3);
assert_eq!(left.as_slice(), b"abc");
assert_eq!(right.as_slice(), b"defghi");
}
#[test]
fn test_owned_bytes_split_boundary() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
{
let (left, right) = bytes.clone().split(0);
assert_eq!(left.as_slice(), b"");
assert_eq!(right.as_slice(), b"abcdefghi");
}
{
let (left, right) = bytes.split(9);
assert_eq!(left.as_slice(), b"abcdefghi");
assert_eq!(right.as_slice(), b"");
}
}
}

View File

@@ -1,9 +1,9 @@
use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken; use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle}; use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr}; use crate::directory::{TerminatingWrite, WritePtr};
use crate::{common::HasLen, core::META_FILEPATH};
use fail::fail_point; use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
@@ -12,8 +12,6 @@ use std::path::{Path, PathBuf};
use std::result; use std::result;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use super::FileHandle;
/// Writer associated with the `RAMDirectory` /// Writer associated with the `RAMDirectory`
/// ///
/// The Writer just writes a buffer. /// The Writer just writes a buffer.
@@ -44,12 +42,12 @@ impl VecWriter {
impl Drop for VecWriter { impl Drop for VecWriter {
fn drop(&mut self) { fn drop(&mut self) {
// if !self.is_flushed { if !self.is_flushed {
// panic!( panic!(
// "You forgot to flush {:?} before its writter got Drop. Do not rely on drop.", "You forgot to flush {:?} before its writter got Drop. Do not rely on drop.",
// self.path self.path
// ) )
// } }
} }
} }
@@ -82,17 +80,17 @@ impl TerminatingWrite for VecWriter {
#[derive(Default)] #[derive(Default)]
struct InnerDirectory { struct InnerDirectory {
fs: HashMap<PathBuf, FileSlice>, fs: HashMap<PathBuf, ReadOnlySource>,
watch_router: WatchCallbackList, watch_router: WatchCallbackList,
} }
impl InnerDirectory { impl InnerDirectory {
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool { fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
let data = FileSlice::from(data.to_vec()); let data = ReadOnlySource::new(Vec::from(data));
self.fs.insert(path, data).is_some() self.fs.insert(path, data).is_some()
} }
fn open_read(&self, path: &Path) -> Result<FileSlice, OpenReadError> { fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.fs self.fs
.get(path) .get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path))) .ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
@@ -153,11 +151,11 @@ impl RAMDirectory {
/// written using the `atomic_write` api. /// written using the `atomic_write` api.
/// ///
/// If an error is encounterred, files may be persisted partially. /// If an error is encounterred, files may be persisted partially.
pub fn persist(&self, dest: &dyn Directory) -> crate::Result<()> { pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
let wlock = self.fs.write().unwrap(); let wlock = self.fs.write().unwrap();
for (path, file) in wlock.fs.iter() { for (path, source) in wlock.fs.iter() {
let mut dest_wrt = dest.open_write(path)?; let mut dest_wrt = dest.open_write(path)?;
dest_wrt.write_all(file.read_bytes()?.as_slice())?; dest_wrt.write_all(source.as_slice())?;
dest_wrt.terminate()?; dest_wrt.terminate()?;
} }
Ok(()) Ok(())
@@ -165,37 +163,24 @@ impl RAMDirectory {
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
fn get_file_handle(&self, path: &Path) -> Result<Box<dyn FileHandle>, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let file_slice = self.open_read(path)?;
Ok(Box::new(file_slice))
}
fn open_read(&self, path: &Path) -> result::Result<FileSlice, OpenReadError> {
self.fs.read().unwrap().open_read(path) self.fs.read().unwrap().open_read(path)
} }
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| { fail_point!("RAMDirectory::delete", |_| {
Err(DeleteError::IOError { use crate::directory::error::IOError;
io_error: io::Error::from(io::ErrorKind::Other), let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
filepath: path.to_path_buf(), Err(DeleteError::from(io_error))
})
}); });
self.fs.write().unwrap().delete(path) self.fs.write().unwrap().delete(path)
} }
fn exists(&self, path: &Path) -> Result<bool, OpenReadError> { fn exists(&self, path: &Path) -> bool {
Ok(self self.fs.read().unwrap().exists(path)
.fs
.read()
.map_err(|e| OpenReadError::IOError {
io_error: io::Error::new(io::ErrorKind::Other, e.to_string()),
filepath: path.to_path_buf(),
})?
.exists(path))
} }
fn open_write(&self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.write().unwrap(); let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
let vec_writer = VecWriter::new(path_buf.clone(), self.clone()); let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
@@ -209,17 +194,10 @@ impl Directory for RAMDirectory {
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> { fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let bytes = Ok(self.open_read(path)?.as_slice().to_owned())
self.open_read(path)?
.read_bytes()
.map_err(|io_error| OpenReadError::IOError {
io_error,
filepath: path.to_path_buf(),
})?;
Ok(bytes.as_slice().to_owned())
} }
fn atomic_write(&self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new( fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
msg.unwrap_or_else(|| "Undefined".to_string()) msg.unwrap_or_else(|| "Undefined".to_string())
@@ -256,13 +234,13 @@ mod tests {
let msg_seq: &'static [u8] = b"sequential is the way"; let msg_seq: &'static [u8] = b"sequential is the way";
let path_atomic: &'static Path = Path::new("atomic"); let path_atomic: &'static Path = Path::new("atomic");
let path_seq: &'static Path = Path::new("seq"); let path_seq: &'static Path = Path::new("seq");
let directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok()); assert!(directory.atomic_write(path_atomic, msg_atomic).is_ok());
let mut wrt = directory.open_write(path_seq).unwrap(); let mut wrt = directory.open_write(path_seq).unwrap();
assert!(wrt.write_all(msg_seq).is_ok()); assert!(wrt.write_all(msg_seq).is_ok());
assert!(wrt.flush().is_ok()); assert!(wrt.flush().is_ok());
let directory_copy = RAMDirectory::create(); let mut directory_copy = RAMDirectory::create();
assert!(directory.persist(&directory_copy).is_ok()); assert!(directory.persist(&mut directory_copy).is_ok());
assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic); assert_eq!(directory_copy.atomic_read(path_atomic).unwrap(), msg_atomic);
assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq); assert_eq!(directory_copy.atomic_read(path_seq).unwrap(), msg_seq);
} }

View File

@@ -0,0 +1,137 @@
use crate::common::HasLen;
use stable_deref_trait::{CloneStableDeref, StableDeref};
use std::ops::Deref;
use std::sync::Arc;
pub type BoxedData = Box<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
/// Read object that represents files in tantivy.
///
/// These read objects are only in charge to deliver
/// the data in the form of a constant read-only `&[u8]`.
/// Whatever happens to the directory file, the data
/// hold by this object should never be altered or destroyed.
pub struct ReadOnlySource {
data: Arc<BoxedData>,
start: usize,
stop: usize,
}
unsafe impl StableDeref for ReadOnlySource {}
unsafe impl CloneStableDeref for ReadOnlySource {}
impl Deref for ReadOnlySource {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.as_slice()
}
}
impl From<Arc<BoxedData>> for ReadOnlySource {
fn from(data: Arc<BoxedData>) -> Self {
let len = data.len();
ReadOnlySource {
data,
start: 0,
stop: len,
}
}
}
impl ReadOnlySource {
pub(crate) fn new<D>(data: D) -> ReadOnlySource
where
D: Deref<Target = [u8]> + Send + Sync + 'static,
{
let len = data.len();
ReadOnlySource {
data: Arc::new(Box::new(data)),
start: 0,
stop: len,
}
}
/// Creates an empty ReadOnlySource
pub fn empty() -> ReadOnlySource {
ReadOnlySource::new(&[][..])
}
/// Returns the data underlying the ReadOnlySource object.
pub fn as_slice(&self) -> &[u8] {
&self.data[self.start..self.stop]
}
/// Splits into 2 `ReadOnlySource`, at the offset given
/// as an argument.
pub fn split(self, addr: usize) -> (ReadOnlySource, ReadOnlySource) {
let left = self.slice(0, addr);
let right = self.slice_from(addr);
(left, right)
}
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Creates a ReadOnlySource that is just a
/// view over a slice of the data.
///
/// Keep in mind that any living slice extends
/// the lifetime of the original ReadOnlySource,
///
/// For instance, if `ReadOnlySource` wraps 500MB
/// worth of data in anonymous memory, and only a
/// 1KB slice is remaining, the whole `500MBs`
/// are retained in memory.
pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
assert!(
start <= stop,
"Requested negative slice [{}..{}]",
start,
stop
);
assert!(stop <= self.len());
ReadOnlySource {
data: self.data.clone(),
start: self.start + start,
stop: self.start + stop,
}
}
/// Like `.slice(...)` but enforcing only the `from`
/// boundary.
///
/// Equivalent to `.slice(from_offset, self.len())`
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
self.slice(from_offset, self.len())
}
/// Like `.slice(...)` but enforcing only the `to`
/// boundary.
///
/// Equivalent to `.slice(0, to_offset)`
pub fn slice_to(&self, to_offset: usize) -> ReadOnlySource {
self.slice(0, to_offset)
}
}
impl HasLen for ReadOnlySource {
fn len(&self) -> usize {
self.stop - self.start
}
}
impl Clone for ReadOnlySource {
fn clone(&self) -> Self {
self.slice_from(0)
}
}
impl From<Vec<u8>> for ReadOnlySource {
fn from(data: Vec<u8>) -> ReadOnlySource {
ReadOnlySource::new(data)
}
}

View File

@@ -20,47 +20,45 @@ mod mmap_directory_tests {
} }
#[test] #[test]
fn test_simple() -> crate::Result<()> { fn test_simple() {
let directory = make_directory(); let mut directory = make_directory();
super::test_simple(&directory) super::test_simple(&mut directory);
} }
#[test] #[test]
fn test_write_create_the_file() { fn test_write_create_the_file() {
let directory = make_directory(); let mut directory = make_directory();
super::test_write_create_the_file(&directory); super::test_write_create_the_file(&mut directory);
} }
#[test] #[test]
fn test_rewrite_forbidden() -> crate::Result<()> { fn test_rewrite_forbidden() {
let directory = make_directory(); let mut directory = make_directory();
super::test_rewrite_forbidden(&directory)?; super::test_rewrite_forbidden(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_directory_delete() -> crate::Result<()> { fn test_directory_delete() {
let directory = make_directory(); let mut directory = make_directory();
super::test_directory_delete(&directory)?; super::test_directory_delete(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_lock_non_blocking() { fn test_lock_non_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_non_blocking(&directory); super::test_lock_non_blocking(&mut directory);
} }
#[test] #[test]
fn test_lock_blocking() { fn test_lock_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_blocking(&directory); super::test_lock_blocking(&mut directory);
} }
#[test] #[test]
fn test_watch() { fn test_watch() {
let directory = make_directory(); let mut directory = make_directory();
super::test_watch(&directory); super::test_watch(&mut directory);
} }
} }
@@ -74,47 +72,45 @@ mod ram_directory_tests {
} }
#[test] #[test]
fn test_simple() -> crate::Result<()> { fn test_simple() {
let directory = make_directory(); let mut directory = make_directory();
super::test_simple(&directory) super::test_simple(&mut directory);
} }
#[test] #[test]
fn test_write_create_the_file() { fn test_write_create_the_file() {
let directory = make_directory(); let mut directory = make_directory();
super::test_write_create_the_file(&directory); super::test_write_create_the_file(&mut directory);
} }
#[test] #[test]
fn test_rewrite_forbidden() -> crate::Result<()> { fn test_rewrite_forbidden() {
let directory = make_directory(); let mut directory = make_directory();
super::test_rewrite_forbidden(&directory)?; super::test_rewrite_forbidden(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_directory_delete() -> crate::Result<()> { fn test_directory_delete() {
let directory = make_directory(); let mut directory = make_directory();
super::test_directory_delete(&directory)?; super::test_directory_delete(&mut directory);
Ok(())
} }
#[test] #[test]
fn test_lock_non_blocking() { fn test_lock_non_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_non_blocking(&directory); super::test_lock_non_blocking(&mut directory);
} }
#[test] #[test]
fn test_lock_blocking() { fn test_lock_blocking() {
let directory = make_directory(); let mut directory = make_directory();
super::test_lock_blocking(&directory); super::test_lock_blocking(&mut directory);
} }
#[test] #[test]
fn test_watch() { fn test_watch() {
let directory = make_directory(); let mut directory = make_directory();
super::test_watch(&directory); super::test_watch(&mut directory);
} }
} }
@@ -122,61 +118,68 @@ mod ram_directory_tests {
#[should_panic] #[should_panic]
fn ram_directory_panics_if_flush_forgotten() { fn ram_directory_panics_if_flush_forgotten() {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
let ram_directory = RAMDirectory::create(); let mut ram_directory = RAMDirectory::create();
let mut write_file = ram_directory.open_write(test_path).unwrap(); let mut write_file = ram_directory.open_write(test_path).unwrap();
assert!(write_file.write_all(&[4]).is_ok()); assert!(write_file.write_all(&[4]).is_ok());
} }
fn test_simple(directory: &dyn Directory) -> crate::Result<()> { fn test_simple(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
let mut write_file = directory.open_write(test_path)?; {
assert!(directory.exists(test_path).unwrap()); let mut write_file = directory.open_write(test_path).unwrap();
write_file.write_all(&[4])?; assert!(directory.exists(test_path));
write_file.write_all(&[3])?; write_file.write_all(&[4]).unwrap();
write_file.write_all(&[7, 3, 5])?; write_file.write_all(&[3]).unwrap();
write_file.flush()?; write_file.write_all(&[7, 3, 5]).unwrap();
let read_file = directory.open_read(test_path)?.read_bytes()?; write_file.flush().unwrap();
assert_eq!(read_file.as_slice(), &[4u8, 3u8, 7u8, 3u8, 5u8]); }
mem::drop(read_file); {
let read_file = directory.open_read(test_path).unwrap();
let data: &[u8] = &*read_file;
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
}
assert!(directory.delete(test_path).is_ok()); assert!(directory.delete(test_path).is_ok());
assert!(!directory.exists(test_path).unwrap()); assert!(!directory.exists(test_path));
Ok(())
} }
fn test_rewrite_forbidden(directory: &dyn Directory) -> crate::Result<()> { fn test_rewrite_forbidden(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
directory.open_write(test_path)?; {
assert!(directory.exists(test_path).unwrap()); directory.open_write(test_path).unwrap();
assert!(directory.open_write(test_path).is_err()); assert!(directory.exists(test_path));
}
{
assert!(directory.open_write(test_path).is_err());
}
assert!(directory.delete(test_path).is_ok()); assert!(directory.delete(test_path).is_ok());
Ok(())
} }
fn test_write_create_the_file(directory: &dyn Directory) { fn test_write_create_the_file(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
{ {
assert!(directory.open_read(test_path).is_err()); assert!(directory.open_read(test_path).is_err());
let _w = directory.open_write(test_path).unwrap(); let _w = directory.open_write(test_path).unwrap();
assert!(directory.exists(test_path).unwrap()); assert!(directory.exists(test_path));
assert!(directory.open_read(test_path).is_ok()); assert!(directory.open_read(test_path).is_ok());
assert!(directory.delete(test_path).is_ok()); assert!(directory.delete(test_path).is_ok());
} }
} }
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> { fn test_directory_delete(directory: &mut dyn Directory) {
let test_path: &'static Path = Path::new("some_path_for_test"); let test_path: &'static Path = Path::new("some_path_for_test");
assert!(directory.open_read(test_path).is_err()); assert!(directory.open_read(test_path).is_err());
let mut write_file = directory.open_write(&test_path)?; let mut write_file = directory.open_write(&test_path).unwrap();
write_file.write_all(&[1, 2, 3, 4])?; write_file.write_all(&[1, 2, 3, 4]).unwrap();
write_file.flush()?; write_file.flush().unwrap();
{ {
let read_handle = directory.open_read(&test_path)?.read_bytes()?; let read_handle = directory.open_read(&test_path).unwrap();
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]); assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
// Mapped files can't be deleted on Windows // Mapped files can't be deleted on Windows
if !cfg!(windows) { if !cfg!(windows) {
assert!(directory.delete(&test_path).is_ok()); assert!(directory.delete(&test_path).is_ok());
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]); assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
} }
assert!(directory.delete(Path::new("SomeOtherPath")).is_err()); assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
} }
@@ -186,40 +189,44 @@ fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
assert!(directory.open_read(&test_path).is_err()); assert!(directory.open_read(&test_path).is_err());
assert!(directory.delete(&test_path).is_err()); assert!(directory.delete(&test_path).is_err());
Ok(())
} }
fn test_watch(directory: &dyn Directory) { fn test_watch(directory: &mut dyn Directory) {
let num_progress: Arc<AtomicUsize> = Default::default();
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let (tx, rx) = crossbeam::channel::unbounded(); let counter_clone = counter.clone();
let timeout = Duration::from_millis(500); let (sender, receiver) = crossbeam::channel::unbounded();
let watch_callback = Box::new(move || {
let handle = directory counter_clone.fetch_add(1, SeqCst);
.watch(WatchCallback::new(move || { });
let val = counter.fetch_add(1, SeqCst); // This callback is used to synchronize watching in our unit test.
tx.send(val + 1).unwrap(); // We bind it to a variable because the callback is removed when that
// handle is dropped.
let watch_handle = directory.watch(watch_callback).unwrap();
let _progress_listener = directory
.watch(Box::new(move || {
let val = num_progress.fetch_add(1, SeqCst);
let _ = sender.send(val);
})) }))
.unwrap(); .unwrap();
for i in 0..10 {
assert_eq!(i, counter.load(SeqCst));
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok());
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
assert_eq!(i + 1, counter.load(SeqCst));
}
mem::drop(watch_handle);
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"foo") .atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok()); .is_ok());
assert_eq!(rx.recv_timeout(timeout), Ok(1)); assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
assert_eq!(10, counter.load(SeqCst));
assert!(directory
.atomic_write(Path::new("meta.json"), b"bar")
.is_ok());
assert_eq!(rx.recv_timeout(timeout), Ok(2));
mem::drop(handle);
assert!(directory
.atomic_write(Path::new("meta.json"), b"qux")
.is_ok());
assert!(rx.recv_timeout(timeout).is_err());
} }
fn test_lock_non_blocking(directory: &dyn Directory) { fn test_lock_non_blocking(directory: &mut dyn Directory) {
{ {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
@@ -244,7 +251,7 @@ fn test_lock_non_blocking(directory: &dyn Directory) {
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
} }
fn test_lock_blocking(directory: &dyn Directory) { fn test_lock_blocking(directory: &mut dyn Directory) {
let lock_a_res = directory.acquire_lock(&Lock { let lock_a_res = directory.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
is_blocking: true, is_blocking: true,

View File

@@ -4,20 +4,8 @@ use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`. /// Type alias for callbacks registered when watching files of a `Directory`.
#[derive(Clone)] pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
pub struct WatchCallback(Arc<dyn Fn() + Sync + Send>);
impl WatchCallback {
/// Wraps a `Fn()` to create a WatchCallback.
pub fn new<F: Fn() + Sync + Send + 'static>(op: F) -> Self {
WatchCallback(Arc::new(op))
}
fn call(&self) {
self.0()
}
}
/// Helper struct to implement the watch method in `Directory` implementations. /// Helper struct to implement the watch method in `Directory` implementations.
/// ///
@@ -41,17 +29,10 @@ impl WatchHandle {
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle { pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
WatchHandle(watch_callback) WatchHandle(watch_callback)
} }
/// Returns an empty watch handle.
///
/// This function is only useful when implementing a readonly directory.
pub fn empty() -> WatchHandle {
WatchHandle::new(Arc::new(WatchCallback::new(|| {})))
}
} }
impl WatchCallbackList { impl WatchCallbackList {
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback. /// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle { pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
let watch_callback_arc = Arc::new(watch_callback); let watch_callback_arc = Arc::new(watch_callback);
let watch_callback_weak = Arc::downgrade(&watch_callback_arc); let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
@@ -59,13 +40,13 @@ impl WatchCallbackList {
WatchHandle::new(watch_callback_arc) WatchHandle::new(watch_callback_arc)
} }
fn list_callback(&self) -> Vec<WatchCallback> { fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
let mut callbacks: Vec<WatchCallback> = vec![]; let mut callbacks = vec![];
let mut router_wlock = self.router.write().unwrap(); let mut router_wlock = self.router.write().unwrap();
let mut i = 0; let mut i = 0;
while i < router_wlock.len() { while i < router_wlock.len() {
if let Some(watch) = router_wlock[i].upgrade() { if let Some(watch) = router_wlock[i].upgrade() {
callbacks.push(watch.as_ref().clone()); callbacks.push(watch);
i += 1; i += 1;
} else { } else {
router_wlock.swap_remove(i); router_wlock.swap_remove(i);
@@ -87,7 +68,7 @@ impl WatchCallbackList {
.name("watch-callbacks".to_string()) .name("watch-callbacks".to_string())
.spawn(move || { .spawn(move || {
for callback in callbacks { for callback in callbacks {
callback.call(); callback();
} }
let _ = sender.send(()); let _ = sender.send(());
}); });
@@ -103,7 +84,7 @@ impl WatchCallbackList {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::{WatchCallback, WatchCallbackList}; use crate::directory::WatchCallbackList;
use futures::executor::block_on; use futures::executor::block_on;
use std::mem; use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
@@ -114,7 +95,7 @@ mod tests {
let watch_event_router = WatchCallbackList::default(); let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let inc_callback = WatchCallback::new(move || { let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
}); });
block_on(watch_event_router.broadcast()); block_on(watch_event_router.broadcast());
@@ -142,7 +123,7 @@ mod tests {
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let inc_callback = |inc: usize| { let inc_callback = |inc: usize| {
let counter_clone = counter.clone(); let counter_clone = counter.clone();
WatchCallback::new(move || { Box::new(move || {
counter_clone.fetch_add(inc, Ordering::SeqCst); counter_clone.fetch_add(inc, Ordering::SeqCst);
}) })
}; };
@@ -170,7 +151,7 @@ mod tests {
let watch_event_router = WatchCallbackList::default(); let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let inc_callback = WatchCallback::new(move || { let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
}); });
let handle_a = watch_event_router.subscribe(inc_callback); let handle_a = watch_event_router.subscribe(inc_callback);

View File

@@ -10,7 +10,7 @@ use std::borrow::BorrowMut;
pub const TERMINATED: DocId = std::i32::MAX as u32; pub const TERMINATED: DocId = std::i32::MAX as u32;
/// Represents an iterable set of sorted doc ids. /// Represents an iterable set of sorted doc ids.
pub trait DocSet: Send { pub trait DocSet {
/// Goes to the next element. /// Goes to the next element.
/// ///
/// The DocId of the next element is returned. /// The DocId of the next element is returned.
@@ -129,14 +129,6 @@ impl<'a> DocSet for &'a mut dyn DocSet {
fn size_hint(&self) -> u32 { fn size_hint(&self) -> u32 {
(**self).size_hint() (**self).size_hint()
} }
fn count(&mut self, delete_bitset: &DeleteBitSet) -> u32 {
(**self).count(delete_bitset)
}
fn count_including_deleted(&mut self) -> u32 {
(**self).count_including_deleted()
}
} }
impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> { impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {

View File

@@ -2,27 +2,21 @@
use std::io; use std::io;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{Incompatibility, LockError}; use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError; use crate::fastfield::FastFieldNotAvailableError;
use crate::query; use crate::query;
use crate::{ use crate::schema;
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
schema,
};
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::PoisonError; use std::sync::PoisonError;
/// Represents a `DataCorruption` error.
///
/// When facing data corruption, tantivy actually panic or return this error.
pub struct DataCorruption { pub struct DataCorruption {
filepath: Option<PathBuf>, filepath: Option<PathBuf>,
comment: String, comment: String,
} }
impl DataCorruption { impl DataCorruption {
/// Creates a `DataCorruption` Error.
pub fn new(filepath: PathBuf, comment: String) -> DataCorruption { pub fn new(filepath: PathBuf, comment: String) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: Some(filepath), filepath: Some(filepath),
@@ -30,11 +24,10 @@ impl DataCorruption {
} }
} }
/// Creates a `DataCorruption` Error, when the filepath is irrelevant. pub fn comment_only(comment: String) -> DataCorruption {
pub fn comment_only<TStr: ToString>(comment: TStr) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: None, filepath: None,
comment: comment.to_string(), comment,
} }
} }
} }
@@ -50,47 +43,44 @@ impl fmt::Debug for DataCorruption {
} }
} }
/// The library's error enum /// The library's failure based error enum
#[derive(Debug, Error)] #[derive(Debug, Fail)]
pub enum TantivyError { pub enum TantivyError {
/// Failed to open the directory. /// Path does not exist.
#[error("Failed to open the directory: '{0:?}'")] #[fail(display = "Path does not exist: '{:?}'", _0)]
OpenDirectoryError(#[from] OpenDirectoryError), PathDoesNotExist(PathBuf),
/// Failed to open a file for read. /// File already exists, this is a problem when we try to write into a new file.
#[error("Failed to open file for read: '{0:?}'")] #[fail(display = "File already exists: '{:?}'", _0)]
OpenReadError(#[from] OpenReadError), FileAlreadyExists(PathBuf),
/// Failed to open a file for write.
#[error("Failed to open file for write: '{0:?}'")]
OpenWriteError(#[from] OpenWriteError),
/// Index already exists in this directory /// Index already exists in this directory
#[error("Index already exists")] #[fail(display = "Index already exists")]
IndexAlreadyExists, IndexAlreadyExists,
/// Failed to acquire file lock /// Failed to acquire file lock
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")] #[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
LockFailure(LockError, Option<String>), LockFailure(LockError, Option<String>),
/// IO Error. /// IO Error.
#[error("An IO error occurred: '{0}'")] #[fail(display = "An IO error occurred: '{}'", _0)]
IOError(#[from] io::Error), IOError(#[cause] IOError),
/// Data corruption. /// Data corruption.
#[error("Data corrupted: '{0:?}'")] #[fail(display = "{:?}", _0)]
DataCorruption(DataCorruption), DataCorruption(DataCorruption),
/// A thread holding the locked panicked and poisoned the lock. /// A thread holding the locked panicked and poisoned the lock.
#[error("A thread holding the locked panicked and poisoned the lock")] #[fail(display = "A thread holding the locked panicked and poisoned the lock")]
Poisoned, Poisoned,
/// Invalid argument was passed by the user. /// Invalid argument was passed by the user.
#[error("An invalid argument was passed: '{0}'")] #[fail(display = "An invalid argument was passed: '{}'", _0)]
InvalidArgument(String), InvalidArgument(String),
/// An Error happened in one of the thread. /// An Error happened in one of the thread.
#[error("An error occurred in a thread: '{0}'")] #[fail(display = "An error occurred in a thread: '{}'", _0)]
ErrorInThread(String), ErrorInThread(String),
/// An Error appeared related to the schema. /// An Error appeared related to the schema.
#[error("Schema error: '{0}'")] #[fail(display = "Schema error: '{}'", _0)]
SchemaError(String), SchemaError(String),
/// System error. (e.g.: We failed spawning a new thread) /// System error. (e.g.: We failed spawning a new thread)
#[error("System error.'{0}'")] #[fail(display = "System error.'{}'", _0)]
SystemError(String), SystemError(String),
/// Index incompatible with current version of tantivy /// Index incompatible with current version of tantivy
#[error("{0:?}")] #[fail(display = "{:?}", _0)]
IncompatibleIndex(Incompatibility), IncompatibleIndex(Incompatibility),
} }
@@ -99,17 +89,31 @@ impl From<DataCorruption> for TantivyError {
TantivyError::DataCorruption(data_corruption) TantivyError::DataCorruption(data_corruption)
} }
} }
impl From<FastFieldNotAvailableError> for TantivyError { impl From<FastFieldNotAvailableError> for TantivyError {
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError { fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
TantivyError::SchemaError(format!("{}", fastfield_error)) TantivyError::SchemaError(format!("{}", fastfield_error))
} }
} }
impl From<LockError> for TantivyError { impl From<LockError> for TantivyError {
fn from(lock_error: LockError) -> TantivyError { fn from(lock_error: LockError) -> TantivyError {
TantivyError::LockFailure(lock_error, None) TantivyError::LockFailure(lock_error, None)
} }
} }
impl From<IOError> for TantivyError {
fn from(io_error: IOError) -> TantivyError {
TantivyError::IOError(io_error)
}
}
impl From<io::Error> for TantivyError {
fn from(io_error: io::Error) -> TantivyError {
TantivyError::IOError(io_error.into())
}
}
impl From<query::QueryParserError> for TantivyError { impl From<query::QueryParserError> for TantivyError {
fn from(parsing_error: query::QueryParserError) -> TantivyError { fn from(parsing_error: query::QueryParserError) -> TantivyError {
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error)) TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
@@ -122,9 +126,15 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
} }
} }
impl From<chrono::ParseError> for TantivyError { impl From<OpenReadError> for TantivyError {
fn from(err: chrono::ParseError) -> TantivyError { fn from(error: OpenReadError) -> TantivyError {
TantivyError::InvalidArgument(err.to_string()) match error {
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
OpenReadError::IncompatibleIndex(incompatibility) => {
TantivyError::IncompatibleIndex(incompatibility)
}
}
} }
} }
@@ -134,9 +144,35 @@ impl From<schema::DocParsingError> for TantivyError {
} }
} }
impl From<OpenWriteError> for TantivyError {
fn from(error: OpenWriteError) -> TantivyError {
match error {
OpenWriteError::FileAlreadyExists(filepath) => {
TantivyError::FileAlreadyExists(filepath)
}
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
}
}
}
impl From<OpenDirectoryError> for TantivyError {
fn from(error: OpenDirectoryError) -> TantivyError {
match error {
OpenDirectoryError::DoesNotExist(directory_path) => {
TantivyError::PathDoesNotExist(directory_path)
}
OpenDirectoryError::NotADirectory(directory_path) => {
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
}
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
}
}
}
impl From<serde_json::Error> for TantivyError { impl From<serde_json::Error> for TantivyError {
fn from(error: serde_json::Error) -> TantivyError { fn from(error: serde_json::Error) -> TantivyError {
TantivyError::IOError(error.into()) let io_err = io::Error::from(error);
TantivyError::IOError(io_err.into())
} }
} }

View File

@@ -6,114 +6,31 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value}; use crate::schema::Schema;
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED}; use crate::Index;
use crate::{DocAddress, DocSet, Index, Searcher, Term};
use std::ops::Deref;
#[test] #[test]
fn test_bytes() -> crate::Result<()> { fn test_bytes() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let bytes_field = schema_builder.add_bytes_field("bytesfield", FAST); let field = schema_builder.add_bytes_field("bytesfield");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(bytes_field=>vec![0u8, 1, 2, 3])); index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
index_writer.add_document(doc!(bytes_field=>vec![])); index_writer.add_document(doc!(field=>vec![]));
index_writer.add_document(doc!(bytes_field=>vec![255u8])); index_writer.add_document(doc!(field=>vec![255u8]));
index_writer.add_document(doc!(bytes_field=>vec![1u8, 3, 5, 7, 9])); index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
index_writer.add_document(doc!(bytes_field=>vec![0u8; 1000])); index_writer.add_document(doc!(field=>vec![0u8; 1000]));
index_writer.commit()?; assert!(index_writer.commit().is_ok());
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let bytes_reader = segment_reader.fast_fields().bytes(bytes_field).unwrap(); let bytes_reader = segment_reader.fast_fields().bytes(field).unwrap();
assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]); assert_eq!(bytes_reader.get_bytes(0), &[0u8, 1, 2, 3]);
assert!(bytes_reader.get_bytes(1).is_empty()); assert!(bytes_reader.get_bytes(1).is_empty());
assert_eq!(bytes_reader.get_bytes(2), &[255u8]); assert_eq!(bytes_reader.get_bytes(2), &[255u8]);
assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]); assert_eq!(bytes_reader.get_bytes(3), &[1u8, 3, 5, 7, 9]);
let long = vec![0u8; 1000]; let long = vec![0u8; 1000];
assert_eq!(bytes_reader.get_bytes(4), long.as_slice()); assert_eq!(bytes_reader.get_bytes(4), long.as_slice());
Ok(())
}
fn create_index_for_test<T: Into<BytesOptions>>(
byte_options: T,
) -> crate::Result<impl Deref<Target = Searcher>> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_bytes_field("string_bytes", byte_options.into());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
field => b"tantivy".as_ref(),
field => b"lucene".as_ref()
));
index_writer.commit()?;
Ok(index.reader()?.searcher())
}
#[test]
fn test_stored_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(STORED)?;
assert_eq!(searcher.num_docs(), 1);
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
let field = searcher.schema().get_field("string_bytes").unwrap();
let values: Vec<&Value> = retrieved_doc.get_all(field).collect();
assert_eq!(values.len(), 2);
let values_bytes: Vec<&[u8]> = values
.into_iter()
.flat_map(|value| value.bytes_value())
.collect();
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
Ok(())
}
#[test]
fn test_non_stored_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(INDEXED)?;
assert_eq!(searcher.num_docs(), 1);
let retrieved_doc = searcher.doc(DocAddress(0u32, 0u32))?;
let field = searcher.schema().get_field("string_bytes").unwrap();
assert!(retrieved_doc.get_first(field).is_none());
Ok(())
}
#[test]
fn test_index_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(INDEXED)?;
assert_eq!(searcher.num_docs(), 1);
let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight = term_query.specialized_weight(&searcher, true)?;
let term_scorer = term_weight.specialized_scorer(searcher.segment_reader(0), 1.0)?;
assert_eq!(term_scorer.doc(), 0u32);
Ok(())
}
#[test]
fn test_non_index_bytes() -> crate::Result<()> {
let searcher = create_index_for_test(STORED)?;
assert_eq!(searcher.num_docs(), 1);
let field = searcher.schema().get_field("string_bytes").unwrap();
let term = Term::from_field_bytes(field, b"lucene".as_ref());
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
let term_weight_err = term_query.specialized_weight(&searcher, false);
assert!(matches!(
term_weight_err,
Err(crate::TantivyError::SchemaError(_))
));
Ok(())
}
#[test]
fn test_fast_bytes_multivalue_value() -> crate::Result<()> {
let searcher = create_index_for_test(FAST)?;
assert_eq!(searcher.num_docs(), 1);
let fast_fields = searcher.segment_reader(0u32).fast_fields();
let field = searcher.schema().get_field("string_bytes").unwrap();
let fast_field_reader = fast_fields.bytes(field).unwrap();
assert_eq!(fast_field_reader.get_bytes(0u32), b"tantivy");
Ok(())
} }
} }

View File

@@ -1,5 +1,6 @@
use crate::directory::FileSlice; use owning_ref::OwningRef;
use crate::directory::OwnedBytes;
use crate::directory::ReadOnlySource;
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::DocId; use crate::DocId;
@@ -16,16 +17,16 @@ use crate::DocId;
#[derive(Clone)] #[derive(Clone)]
pub struct BytesFastFieldReader { pub struct BytesFastFieldReader {
idx_reader: FastFieldReader<u64>, idx_reader: FastFieldReader<u64>,
values: OwnedBytes, values: OwningRef<ReadOnlySource, [u8]>,
} }
impl BytesFastFieldReader { impl BytesFastFieldReader {
pub(crate) fn open( pub(crate) fn open(
idx_reader: FastFieldReader<u64>, idx_reader: FastFieldReader<u64>,
values_file: FileSlice, values_source: ReadOnlySource,
) -> crate::Result<BytesFastFieldReader> { ) -> BytesFastFieldReader {
let values = values_file.read_bytes()?; let values = OwningRef::new(values_source).map(|source| &source[..]);
Ok(BytesFastFieldReader { idx_reader, values }) BytesFastFieldReader { idx_reader, values }
} }
fn range(&self, doc: DocId) -> (usize, usize) { fn range(&self, doc: DocId) -> (usize, usize) {
@@ -37,7 +38,7 @@ impl BytesFastFieldReader {
/// Returns the bytes associated to the given `doc` /// Returns the bytes associated to the given `doc`
pub fn get_bytes(&self, doc: DocId) -> &[u8] { pub fn get_bytes(&self, doc: DocId) -> &[u8] {
let (start, stop) = self.range(doc); let (start, stop) = self.range(doc);
&self.values.as_slice()[start..stop] &self.values[start..stop]
} }
/// Returns the overall number of bytes in this bytes fast field. /// Returns the overall number of bytes in this bytes fast field.

View File

@@ -49,10 +49,16 @@ impl BytesFastFieldWriter {
/// matching field values present in the document. /// matching field values present in the document.
pub fn add_document(&mut self, doc: &Document) { pub fn add_document(&mut self, doc: &Document) {
self.next_doc(); self.next_doc();
for field_value in doc.get_all(self.field) { for field_value in doc.field_values() {
if let Value::Bytes(ref bytes) = field_value { if field_value.field() == self.field {
self.vals.extend_from_slice(bytes); if let Value::Bytes(ref bytes) = *field_value.value() {
return; self.vals.extend_from_slice(bytes);
} else {
panic!(
"Bytes field contained non-Bytes Value!. Field {:?} = {:?}",
self.field, field_value
);
}
} }
} }
} }
@@ -70,18 +76,21 @@ impl BytesFastFieldWriter {
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`. /// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> { pub fn serialize(&self, serializer: &mut FastFieldSerializer) -> io::Result<()> {
// writing the offset index {
let mut doc_index_serializer = // writing the offset index
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?; let mut doc_index_serializer =
for &offset in &self.doc_index { serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
doc_index_serializer.add_val(offset)?; for &offset in &self.doc_index {
doc_index_serializer.add_val(offset)?;
}
doc_index_serializer.add_val(self.vals.len() as u64)?;
doc_index_serializer.close_field()?;
}
{
// writing the values themselves
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1)?;
value_serializer.write_all(&self.vals)?;
} }
doc_index_serializer.add_val(self.vals.len() as u64)?;
doc_index_serializer.close_field()?;
// writing the values themselves
serializer
.new_bytes_fast_field_with_idx(self.field, 1)?
.write_all(&self.vals)?;
Ok(()) Ok(())
} }
} }

View File

@@ -1,6 +1,5 @@
use crate::common::{BitSet, HasLen}; use crate::common::{BitSet, HasLen};
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::space_usage::ByteCount; use crate::space_usage::ByteCount;
use crate::DocId; use crate::DocId;
@@ -10,8 +9,6 @@ use std::io::Write;
/// Write a delete `BitSet` /// Write a delete `BitSet`
/// ///
/// where `delete_bitset` is the set of deleted `DocId`. /// where `delete_bitset` is the set of deleted `DocId`.
/// Warning: this function does not call terminate. The caller is in charge of
/// closing the writer properly.
pub fn write_delete_bitset( pub fn write_delete_bitset(
delete_bitset: &BitSet, delete_bitset: &BitSet,
max_doc: u32, max_doc: u32,
@@ -40,41 +37,22 @@ pub fn write_delete_bitset(
/// Set of deleted `DocId`s. /// Set of deleted `DocId`s.
#[derive(Clone)] #[derive(Clone)]
pub struct DeleteBitSet { pub struct DeleteBitSet {
data: OwnedBytes, data: ReadOnlySource,
len: usize, len: usize,
} }
impl DeleteBitSet { impl DeleteBitSet {
#[cfg(test)] /// Opens a delete bitset given its data source.
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet { pub fn open(data: ReadOnlySource) -> DeleteBitSet {
use crate::directory::{Directory, RAMDirectory, TerminatingWrite}; let num_deleted: usize = data
use std::path::Path;
assert!(docs.iter().all(|&doc| doc < max_doc));
let mut bitset = BitSet::with_max_value(max_doc);
for &doc in docs {
bitset.insert(doc);
}
let directory = RAMDirectory::create();
let path = Path::new("dummydeletebitset");
let mut wrt = directory.open_write(path).unwrap();
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
wrt.terminate().unwrap();
let file = directory.open_read(path).unwrap();
Self::open(file).unwrap()
}
/// Opens a delete bitset given its file.
pub fn open(file: FileSlice) -> crate::Result<DeleteBitSet> {
let bytes = file.read_bytes()?;
let num_deleted: usize = bytes
.as_slice() .as_slice()
.iter() .iter()
.map(|b| b.count_ones() as usize) .map(|b| b.count_ones() as usize)
.sum(); .sum();
Ok(DeleteBitSet { DeleteBitSet {
data: bytes, data,
len: num_deleted, len: num_deleted,
}) }
} }
/// Returns true iff the document is still "alive". In other words, if it has not been deleted. /// Returns true iff the document is still "alive". In other words, if it has not been deleted.
@@ -86,7 +64,7 @@ impl DeleteBitSet {
#[inline(always)] #[inline(always)]
pub fn is_deleted(&self, doc: DocId) -> bool { pub fn is_deleted(&self, doc: DocId) -> bool {
let byte_offset = doc / 8u32; let byte_offset = doc / 8u32;
let b: u8 = self.data.as_slice()[byte_offset as usize]; let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8; let shift = (doc & 7u32) as u8;
b & (1u8 << shift) != 0 b & (1u8 << shift) != 0
} }
@@ -105,35 +83,42 @@ impl HasLen for DeleteBitSet {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::DeleteBitSet; use super::*;
use crate::common::HasLen; use crate::directory::*;
use std::path::PathBuf;
#[test] fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
fn test_delete_bitset_empty() { let test_path = PathBuf::from("test");
let delete_bitset = DeleteBitSet::for_test(&[], 10); let mut directory = RAMDirectory::create();
for doc in 0..10 { {
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc)); let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
writer.terminate().unwrap();
} }
assert_eq!(delete_bitset.len(), 0); let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
for doc in 0..max_doc {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
} }
#[test] #[test]
fn test_delete_bitset() { fn test_delete_bitset() {
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10); {
assert!(delete_bitset.is_alive(0)); let mut bitset = BitSet::with_max_value(10);
assert!(delete_bitset.is_deleted(1)); bitset.insert(1);
assert!(delete_bitset.is_alive(2)); bitset.insert(9);
assert!(delete_bitset.is_alive(3)); test_delete_bitset_helper(&bitset, 10);
assert!(delete_bitset.is_alive(4)); }
assert!(delete_bitset.is_alive(5)); {
assert!(delete_bitset.is_alive(6)); let mut bitset = BitSet::with_max_value(8);
assert!(delete_bitset.is_alive(6)); bitset.insert(1);
assert!(delete_bitset.is_alive(7)); bitset.insert(2);
assert!(delete_bitset.is_alive(8)); bitset.insert(3);
assert!(delete_bitset.is_deleted(9)); bitset.insert(5);
for doc in 0..10 { bitset.insert(7);
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc)); test_delete_bitset_helper(&bitset, 8);
} }
assert_eq!(delete_bitset.len(), 2);
} }
} }

View File

@@ -4,8 +4,8 @@ use std::result;
/// `FastFieldNotAvailableError` is returned when the /// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not /// user requested for a fast field reader, and the field was not
/// defined in the schema as a fast field. /// defined in the schema as a fast field.
#[derive(Debug, Error)] #[derive(Debug, Fail)]
#[error("Fast field not available: '{field_name:?}'")] #[fail(display = "Fast field not available: '{:?}'", field_name)]
pub struct FastFieldNotAvailableError { pub struct FastFieldNotAvailableError {
field_name: String, field_name: String,
} }

View File

@@ -1,5 +1,4 @@
use super::MultiValueIntFastFieldReader; use super::MultiValueIntFastFieldReader;
use crate::error::DataCorruption;
use crate::schema::Facet; use crate::schema::Facet;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
@@ -63,73 +62,18 @@ impl FacetReader {
&mut self, &mut self,
facet_ord: TermOrdinal, facet_ord: TermOrdinal,
output: &mut Facet, output: &mut Facet,
) -> crate::Result<()> { ) -> Result<(), str::Utf8Error> {
let found_term = self let found_term = self
.term_dict .term_dict
.ord_to_term(facet_ord as u64, &mut self.buffer)?; .ord_to_term(facet_ord as u64, &mut self.buffer);
assert!(found_term, "Term ordinal {} no found.", facet_ord); assert!(found_term, "Term ordinal {} no found.", facet_ord);
let facet_str = str::from_utf8(&self.buffer[..]) let facet_str = str::from_utf8(&self.buffer[..])?;
.map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?;
output.set_facet_str(facet_str); output.set_facet_str(facet_str);
Ok(()) Ok(())
} }
/// Return the list of facet ordinals associated to a document. /// Return the list of facet ordinals associated to a document.
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) { pub fn facet_ords(&mut self, doc: DocId, output: &mut Vec<u64>) {
self.term_ords.get_vals(doc, output); self.term_ords.get_vals(doc, output);
} }
} }
#[cfg(test)]
mod tests {
use crate::Index;
use crate::{
schema::{Facet, SchemaBuilder},
Document,
};
#[test]
fn test_facet_not_populated_for_all_docs() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::default();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(facet_field=>Facet::from_text("/a/b")));
index_writer.add_document(Document::default());
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let facet_reader = searcher
.segment_reader(0u32)
.facet_reader(facet_field)
.unwrap();
let mut facet_ords = Vec::new();
facet_reader.facet_ords(0u32, &mut facet_ords);
assert_eq!(&facet_ords, &[2u64]);
facet_reader.facet_ords(1u32, &mut facet_ords);
assert!(facet_ords.is_empty());
Ok(())
}
#[test]
fn test_facet_not_populated_for_any_docs() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::default();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(Document::default());
index_writer.add_document(Document::default());
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let facet_reader = searcher
.segment_reader(0u32)
.facet_reader(facet_field)
.unwrap();
let mut facet_ords = Vec::new();
facet_reader.facet_ords(0u32, &mut facet_ords);
assert!(facet_ords.is_empty());
facet_reader.facet_ords(1u32, &mut facet_ords);
assert!(facet_ords.is_empty());
Ok(())
}
}

View File

@@ -33,14 +33,11 @@ pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer; pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::chrono::{NaiveDateTime, Utc};
use crate::common; use crate::common;
use crate::schema::Cardinality; use crate::schema::Cardinality;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Value; use crate::schema::Value;
use crate::{
chrono::{NaiveDateTime, Utc},
schema::Type,
};
mod bytes; mod bytes;
mod delete; mod delete;
@@ -79,9 +76,6 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
fn make_zero() -> Self { fn make_zero() -> Self {
Self::from_u64(0i64.to_u64()) Self::from_u64(0i64.to_u64())
} }
/// Returns the `schema::Type` for this FastValue.
fn to_type() -> Type;
} }
impl FastValue for u64 { impl FastValue for u64 {
@@ -104,10 +98,6 @@ impl FastValue for u64 {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
*self *self
} }
fn to_type() -> Type {
Type::U64
}
} }
impl FastValue for i64 { impl FastValue for i64 {
@@ -129,10 +119,6 @@ impl FastValue for i64 {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
*self as u64 *self as u64
} }
fn to_type() -> Type {
Type::I64
}
} }
impl FastValue for f64 { impl FastValue for f64 {
@@ -154,10 +140,6 @@ impl FastValue for f64 {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
self.to_bits() self.to_bits()
} }
fn to_type() -> Type {
Type::F64
}
} }
impl FastValue for crate::DateTime { impl FastValue for crate::DateTime {
@@ -180,10 +162,6 @@ impl FastValue for crate::DateTime {
fn as_u64(&self) -> u64 { fn as_u64(&self) -> u64 {
self.timestamp().as_u64() self.timestamp().as_u64()
} }
fn to_type() -> Type {
Type::Date
}
} }
fn value_to_u64(value: &Value) -> u64 { fn value_to_u64(value: &Value) -> u64 {
@@ -209,7 +187,6 @@ mod tests {
use crate::schema::FAST; use crate::schema::FAST;
use crate::schema::{Document, IntOptions}; use crate::schema::{Document, IntOptions};
use crate::{Index, SegmentId, SegmentReader}; use crate::{Index, SegmentId, SegmentReader};
use common::HasLen;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
@@ -240,9 +217,9 @@ mod tests {
} }
#[test] #[test]
fn test_intfastfield_small() -> crate::Result<()> { fn test_intfastfield_small() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap(); let mut serializer = FastFieldSerializer::from_write(write).unwrap();
@@ -255,24 +232,27 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 36 as usize); {
let composite_file = CompositeFile::open(&file)?; assert_eq!(source.len(), 36 as usize);
let file = composite_file.open_read(*FIELD).unwrap(); }
let fast_field_reader = FastFieldReader::<u64>::open(file)?; {
assert_eq!(fast_field_reader.get(0), 13u64); let composite_file = CompositeFile::open(&source).unwrap();
assert_eq!(fast_field_reader.get(1), 14u64); let field_source = composite_file.open_read(*FIELD).unwrap();
assert_eq!(fast_field_reader.get(2), 2u64); let fast_field_reader = FastFieldReader::<u64>::open(field_source);
Ok(()) assert_eq!(fast_field_reader.get(0), 13u64);
assert_eq!(fast_field_reader.get(1), 14u64);
assert_eq!(fast_field_reader.get(2), 2u64);
}
} }
#[test] #[test]
fn test_intfastfield_large() -> crate::Result<()> { fn test_intfastfield_large() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test"))?; let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write)?; let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
fast_field_writers.add_document(&doc!(*FIELD=>4u64)); fast_field_writers.add_document(&doc!(*FIELD=>4u64));
fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64)); fast_field_writers.add_document(&doc!(*FIELD=>14_082_001u64));
@@ -283,15 +263,19 @@ mod tests {
fast_field_writers.add_document(&doc!(*FIELD=>1_002u64)); fast_field_writers.add_document(&doc!(*FIELD=>1_002u64));
fast_field_writers.add_document(&doc!(*FIELD=>1_501u64)); fast_field_writers.add_document(&doc!(*FIELD=>1_501u64));
fast_field_writers.add_document(&doc!(*FIELD=>215u64)); fast_field_writers.add_document(&doc!(*FIELD=>215u64));
fast_field_writers.serialize(&mut serializer, &HashMap::new())?; fast_field_writers
serializer.close()?; .serialize(&mut serializer, &HashMap::new())
.unwrap();
serializer.close().unwrap();
} }
let file = directory.open_read(&path)?; let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 61 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; assert_eq!(source.len(), 61 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
assert_eq!(fast_field_reader.get(0), 4u64); assert_eq!(fast_field_reader.get(0), 4u64);
assert_eq!(fast_field_reader.get(1), 14_082_001u64); assert_eq!(fast_field_reader.get(1), 14_082_001u64);
assert_eq!(fast_field_reader.get(2), 3_052u64); assert_eq!(fast_field_reader.get(2), 3_052u64);
@@ -302,13 +286,12 @@ mod tests {
assert_eq!(fast_field_reader.get(7), 1_501u64); assert_eq!(fast_field_reader.get(7), 1_501u64);
assert_eq!(fast_field_reader.get(8), 215u64); assert_eq!(fast_field_reader.get(8), 215u64);
} }
Ok(())
} }
#[test] #[test]
fn test_intfastfield_null_amplitude() -> crate::Result<()> { fn test_intfastfield_null_amplitude() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
@@ -322,23 +305,24 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 34 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); assert_eq!(source.len(), 34 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
for doc in 0..10_000 { for doc in 0..10_000 {
assert_eq!(fast_field_reader.get(doc), 100_000u64); assert_eq!(fast_field_reader.get(doc), 100_000u64);
} }
} }
Ok(())
} }
#[test] #[test]
fn test_intfastfield_large_numbers() -> crate::Result<()> { fn test_intfastfield_large_numbers() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
@@ -354,12 +338,14 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 80042 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; assert_eq!(source.len(), 80042 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
assert_eq!(fast_field_reader.get(0), 0u64); assert_eq!(fast_field_reader.get(0), 0u64);
for doc in 1..10_001 { for doc in 1..10_001 {
assert_eq!( assert_eq!(
@@ -368,13 +354,12 @@ mod tests {
); );
} }
} }
Ok(())
} }
#[test] #[test]
fn test_signed_intfastfield() -> crate::Result<()> { fn test_signed_intfastfield() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST); let i64_field = schema_builder.add_i64_field("field", FAST);
@@ -393,12 +378,14 @@ mod tests {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 17709 as usize);
{ {
let fast_fields_composite = CompositeFile::open(&file)?; assert_eq!(source.len(), 17709 as usize);
}
{
let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(i64_field).unwrap(); let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = FastFieldReader::<i64>::open(data)?; let fast_field_reader = FastFieldReader::<i64>::open(data);
assert_eq!(fast_field_reader.min_value(), -100i64); assert_eq!(fast_field_reader.min_value(), -100i64);
assert_eq!(fast_field_reader.max_value(), 9_999i64); assert_eq!(fast_field_reader.max_value(), 9_999i64);
@@ -411,13 +398,12 @@ mod tests {
assert_eq!(buffer[i], -100i64 + 53i64 + i as i64); assert_eq!(buffer[i], -100i64 + 53i64 + i as i64);
} }
} }
Ok(())
} }
#[test] #[test]
fn test_signed_intfastfield_default_val() -> crate::Result<()> { fn test_signed_intfastfield_default_val() {
let path = Path::new("test"); let path = Path::new("test");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let i64_field = schema_builder.add_i64_field("field", FAST); let i64_field = schema_builder.add_i64_field("field", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -434,14 +420,13 @@ mod tests {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(i64_field).unwrap(); let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = FastFieldReader::<i64>::open(data)?; let fast_field_reader = FastFieldReader::<i64>::open(data);
assert_eq!(fast_field_reader.get(0u32), 0i64); assert_eq!(fast_field_reader.get(0u32), 0i64);
} }
Ok(())
} }
// Warning: this generates the same permutation at each call // Warning: this generates the same permutation at each call
@@ -452,26 +437,28 @@ mod tests {
} }
#[test] #[test]
fn test_intfastfield_permutation() -> crate::Result<()> { fn test_intfastfield_permutation() {
let path = Path::new("test"); let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let n = permutation.len(); let n = permutation.len();
let directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test"))?; let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write)?; let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA); let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation { for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x)); fast_field_writers.add_document(&doc!(*FIELD=>x));
} }
fast_field_writers.serialize(&mut serializer, &HashMap::new())?; fast_field_writers
serializer.close()?; .serialize(&mut serializer, &HashMap::new())
.unwrap();
serializer.close().unwrap();
} }
let file = directory.open_read(&path)?; let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file)?; let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data)?; let fast_field_reader = FastFieldReader::<u64>::open(data);
let mut a = 0u64; let mut a = 0u64;
for _ in 0..n { for _ in 0..n {
@@ -479,7 +466,6 @@ mod tests {
a = fast_field_reader.get(a as u32); a = fast_field_reader.get(a as u32);
} }
} }
Ok(())
} }
#[test] #[test]
@@ -488,7 +474,7 @@ mod tests {
let date_field = schema_builder.add_date_field("date", FAST); let date_field = schema_builder.add_date_field("date", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now())); index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
@@ -525,7 +511,7 @@ mod tests {
); );
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!( index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(1i64.to_u64()), date_field => crate::DateTime::from_u64(1i64.to_u64()),
@@ -612,7 +598,7 @@ mod bench {
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) { fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
let path = Path::new("test"); let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap(); let mut serializer = FastFieldSerializer::from_write(write).unwrap();
@@ -625,11 +611,11 @@ mod bench {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap(); let fast_field_reader = FastFieldReader::<u64>::open(data);
b.iter(|| { b.iter(|| {
let n = test::black_box(7000u32); let n = test::black_box(7000u32);
@@ -646,7 +632,7 @@ mod bench {
fn bench_intfastfield_fflookup(b: &mut Bencher) { fn bench_intfastfield_fflookup(b: &mut Bencher) {
let path = Path::new("test"); let path = Path::new("test");
let permutation = generate_permutation(); let permutation = generate_permutation();
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory.open_write(Path::new("test")).unwrap(); let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap(); let mut serializer = FastFieldSerializer::from_write(write).unwrap();
@@ -659,11 +645,11 @@ mod bench {
.unwrap(); .unwrap();
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(&path).unwrap(); let source = directory.open_read(&path).unwrap();
{ {
let fast_fields_composite = CompositeFile::open(&file).unwrap(); let fast_fields_composite = CompositeFile::open(&source).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap(); let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap(); let fast_field_reader = FastFieldReader::<u64>::open(data);
b.iter(|| { b.iter(|| {
let n = test::black_box(1000u32); let n = test::black_box(1000u32);

View File

@@ -25,7 +25,7 @@ mod tests {
); );
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64, field=>3u64)); index_writer.add_document(doc!(field=>1u64, field=>3u64));
index_writer.add_document(doc!()); index_writer.add_document(doc!());
index_writer.add_document(doc!(field=>4u64)); index_writer.add_document(doc!(field=>4u64));
@@ -64,7 +64,7 @@ mod tests {
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored()); schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let first_time_stamp = chrono::Utc::now(); let first_time_stamp = chrono::Utc::now();
index_writer.add_document( index_writer.add_document(
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64), doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
@@ -100,7 +100,6 @@ mod tests {
.get_first(date_field) .get_first(date_field)
.expect("cannot find value") .expect("cannot find value")
.date_value() .date_value()
.unwrap()
.timestamp(), .timestamp(),
first_time_stamp.timestamp() first_time_stamp.timestamp()
); );
@@ -109,7 +108,7 @@ mod tests {
.get_first(time_i) .get_first(time_i)
.expect("cannot find value") .expect("cannot find value")
.i64_value(), .i64_value(),
Some(1i64) 1i64
); );
} }
} }
@@ -132,7 +131,6 @@ mod tests {
.get_first(date_field) .get_first(date_field)
.expect("cannot find value") .expect("cannot find value")
.date_value() .date_value()
.unwrap()
.timestamp(), .timestamp(),
two_secs_ahead.timestamp() two_secs_ahead.timestamp()
); );
@@ -141,7 +139,7 @@ mod tests {
.get_first(time_i) .get_first(time_i)
.expect("cannot find value") .expect("cannot find value")
.i64_value(), .i64_value(),
Some(3i64) 3i64
); );
} }
} }
@@ -188,7 +186,7 @@ mod tests {
); );
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=> 1i64, field => 3i64)); index_writer.add_document(doc!(field=> 1i64, field => 3i64));
index_writer.add_document(doc!()); index_writer.add_document(doc!());
index_writer.add_document(doc!(field=> -4i64)); index_writer.add_document(doc!(field=> -4i64));
@@ -199,14 +197,22 @@ mod tests {
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new(); let mut vals = Vec::new();
let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap(); let multi_value_reader = segment_reader.fast_fields().i64s(field).unwrap();
multi_value_reader.get_vals(2, &mut vals); {
assert_eq!(&vals, &[-4i64]); multi_value_reader.get_vals(2, &mut vals);
multi_value_reader.get_vals(0, &mut vals); assert_eq!(&vals, &[-4i64]);
assert_eq!(&vals, &[1i64, 3i64]); }
multi_value_reader.get_vals(1, &mut vals); {
assert!(vals.is_empty()); multi_value_reader.get_vals(0, &mut vals);
multi_value_reader.get_vals(3, &mut vals); assert_eq!(&vals, &[1i64, 3i64]);
assert_eq!(&vals, &[-5i64, -20i64, 1i64]); }
{
multi_value_reader.get_vals(1, &mut vals);
assert!(vals.is_empty());
}
{
multi_value_reader.get_vals(3, &mut vals);
assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
}
} }
#[test] #[test]
#[ignore] #[ignore]
@@ -215,7 +221,7 @@ mod tests {
let field = schema_builder.add_facet_field("facetfield"); let field = schema_builder.add_facet_field("facetfield");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for i in 0..100_000 { for i in 0..100_000 {
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str()))); index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
} }

View File

@@ -74,7 +74,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index let mut index_writer = index
.writer_for_tests() .writer_with_num_threads(1, 30_000_000)
.expect("Failed to create index writer."); .expect("Failed to create index writer.");
index_writer.add_document(doc!( index_writer.add_document(doc!(
facet_field => Facet::from("/category/cat2"), facet_field => Facet::from("/category/cat2"),

View File

@@ -143,7 +143,7 @@ impl MultiValueIntFastFieldWriter {
.iter() .iter()
.map(|val| *mapping.get(val).expect("Missing term ordinal")); .map(|val| *mapping.get(val).expect("Missing term ordinal"));
doc_vals.extend(remapped_vals); doc_vals.extend(remapped_vals);
doc_vals.sort_unstable(); doc_vals.sort();
for &val in &doc_vals { for &val in &doc_vals {
value_serializer.add_val(val)?; value_serializer.add_val(val)?;
} }

View File

@@ -3,12 +3,13 @@ use crate::common::bitpacker::BitUnpacker;
use crate::common::compute_num_bits; use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;
use crate::DocId; use crate::DocId;
use owning_ref::OwningRef;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
@@ -19,39 +20,37 @@ use std::path::Path;
/// fast field is required. /// fast field is required.
#[derive(Clone)] #[derive(Clone)]
pub struct FastFieldReader<Item: FastValue> { pub struct FastFieldReader<Item: FastValue> {
bit_unpacker: BitUnpacker, bit_unpacker: BitUnpacker<OwningRef<ReadOnlySource, [u8]>>,
min_value_u64: u64, min_value_u64: u64,
max_value_u64: u64, max_value_u64: u64,
_phantom: PhantomData<Item>, _phantom: PhantomData<Item>,
} }
impl<Item: FastValue> FastFieldReader<Item> { impl<Item: FastValue> FastFieldReader<Item> {
/// Opens a fast field given a file. /// Opens a fast field given a source.
pub fn open(file: FileSlice) -> crate::Result<Self> { pub fn open(data: ReadOnlySource) -> Self {
let mut bytes = file.read_bytes()?; let min_value: u64;
let min_value = u64::deserialize(&mut bytes)?; let amplitude: u64;
let amplitude = u64::deserialize(&mut bytes)?; {
let mut cursor = data.as_slice();
min_value =
u64::deserialize(&mut cursor).expect("Failed to read the min_value of fast field.");
amplitude =
u64::deserialize(&mut cursor).expect("Failed to read the amplitude of fast field.");
}
let max_value = min_value + amplitude; let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude); let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(bytes, num_bits); let owning_ref = OwningRef::new(data).map(|data| &data[16..]);
Ok(FastFieldReader { let bit_unpacker = BitUnpacker::new(owning_ref, num_bits);
FastFieldReader {
min_value_u64: min_value, min_value_u64: min_value,
max_value_u64: max_value, max_value_u64: max_value,
bit_unpacker, bit_unpacker,
_phantom: PhantomData, _phantom: PhantomData,
})
}
pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
FastFieldReader {
bit_unpacker: self.bit_unpacker,
min_value_u64: self.min_value_u64,
max_value_u64: self.max_value_u64,
_phantom: PhantomData,
} }
} }
pub(crate) fn cast<TFastValue: FastValue>(self) -> FastFieldReader<TFastValue> { pub(crate) fn into_u64_reader(self) -> FastFieldReader<u64> {
FastFieldReader { FastFieldReader {
bit_unpacker: self.bit_unpacker, bit_unpacker: self.bit_unpacker,
min_value_u64: self.min_value_u64, min_value_u64: self.min_value_u64,
@@ -136,7 +135,7 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
let field = schema_builder.add_u64_field("field", FAST); let field = schema_builder.add_u64_field("field", FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
let path = Path::new("__dummy__"); let path = Path::new("__dummy__");
let directory: RAMDirectory = RAMDirectory::create(); let mut directory: RAMDirectory = RAMDirectory::create();
{ {
let write: WritePtr = directory let write: WritePtr = directory
.open_write(path) .open_write(path)
@@ -158,11 +157,12 @@ impl<Item: FastValue> From<Vec<Item>> for FastFieldReader<Item> {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(path).expect("Failed to open the file"); let source = directory.open_read(path).expect("Failed to open the file");
let composite_file = CompositeFile::open(&file).expect("Failed to read the composite file"); let composite_file =
let field_file = composite_file CompositeFile::open(&source).expect("Failed to read the composite file");
let field_source = composite_file
.open_read(field) .open_read(field)
.expect("File component not found"); .expect("File component not found");
FastFieldReader::open(field_file).unwrap() FastFieldReader::open(field_source)
} }
} }

View File

@@ -1,6 +1,6 @@
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::MultiValueIntFastFieldReader; use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fastfield::{BytesFastFieldReader, FastValue};
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader}; use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
use crate::schema::{Cardinality, Field, FieldType, Schema}; use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
@@ -68,52 +68,45 @@ impl FastFieldReaders {
}; };
for (field, field_entry) in schema.fields() { for (field, field_entry) in schema.fields() {
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
if let FieldType::Bytes(bytes_option) = field_type { if field_type == &FieldType::Bytes {
if !bytes_option.is_fast() { let idx_reader = fast_fields_composite
continue;
}
let fast_field_idx_file = fast_fields_composite
.open_read_with_idx(field, 0) .open_read_with_idx(field, 0)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?; .ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
let idx_reader = FastFieldReader::open(fast_field_idx_file)?; .map(FastFieldReader::open)?;
let data = fast_fields_composite let data = fast_fields_composite
.open_read_with_idx(field, 1) .open_read_with_idx(field, 1)
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?; .ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
let bytes_fast_field_reader = BytesFastFieldReader::open(idx_reader, data)?;
fast_field_readers fast_field_readers
.fast_bytes .fast_bytes
.insert(field, bytes_fast_field_reader); .insert(field, BytesFastFieldReader::open(idx_reader, data));
} else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) { } else if let Some((fast_type, cardinality)) = type_and_cardinality(field_type) {
match cardinality { match cardinality {
Cardinality::SingleValue => { Cardinality::SingleValue => {
if let Some(fast_field_data) = fast_fields_composite.open_read(field) { if let Some(fast_field_data) = fast_fields_composite.open_read(field) {
match fast_type { match fast_type {
FastType::U64 => { FastType::U64 => {
let fast_field_reader = FastFieldReader::open(fast_field_data)?; let fast_field_reader = FastFieldReader::open(fast_field_data);
fast_field_readers fast_field_readers
.fast_field_u64 .fast_field_u64
.insert(field, fast_field_reader); .insert(field, fast_field_reader);
} }
FastType::I64 => { FastType::I64 => {
let fast_field_reader = fast_field_readers.fast_field_i64.insert(
FastFieldReader::open(fast_field_data.clone())?; field,
fast_field_readers FastFieldReader::open(fast_field_data.clone()),
.fast_field_i64 );
.insert(field, fast_field_reader);
} }
FastType::F64 => { FastType::F64 => {
let fast_field_reader = fast_field_readers.fast_field_f64.insert(
FastFieldReader::open(fast_field_data.clone())?; field,
fast_field_readers FastFieldReader::open(fast_field_data.clone()),
.fast_field_f64 );
.insert(field, fast_field_reader);
} }
FastType::Date => { FastType::Date => {
let fast_field_reader = fast_field_readers.fast_field_date.insert(
FastFieldReader::open(fast_field_data.clone())?; field,
fast_field_readers FastFieldReader::open(fast_field_data.clone()),
.fast_field_date );
.insert(field, fast_field_reader);
} }
} }
} else { } else {
@@ -124,10 +117,10 @@ impl FastFieldReaders {
let idx_opt = fast_fields_composite.open_read_with_idx(field, 0); let idx_opt = fast_fields_composite.open_read_with_idx(field, 0);
let data_opt = fast_fields_composite.open_read_with_idx(field, 1); let data_opt = fast_fields_composite.open_read_with_idx(field, 1);
if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) { if let (Some(fast_field_idx), Some(fast_field_data)) = (idx_opt, data_opt) {
let idx_reader = FastFieldReader::open(fast_field_idx)?; let idx_reader = FastFieldReader::open(fast_field_idx);
match fast_type { match fast_type {
FastType::I64 => { FastType::I64 => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers
@@ -135,7 +128,7 @@ impl FastFieldReaders {
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::U64 => { FastType::U64 => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers
@@ -143,7 +136,7 @@ impl FastFieldReaders {
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::F64 => { FastType::F64 => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers
@@ -151,7 +144,7 @@ impl FastFieldReaders {
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::Date => { FastType::Date => {
let vals_reader = FastFieldReader::open(fast_field_data)?; let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field = let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader); MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers fast_field_readers
@@ -201,14 +194,6 @@ impl FastFieldReaders {
None None
} }
pub(crate) fn typed_fast_field_reader<TFastValue: FastValue>(
&self,
field: Field,
) -> Option<FastFieldReader<TFastValue>> {
self.u64_lenient(field)
.map(|fast_field_reader| fast_field_reader.cast())
}
/// Returns the `i64` fast field reader reader associated to `field`. /// Returns the `i64` fast field reader reader associated to `field`.
/// ///
/// If `field` is not a i64 fast field, this method returns `None`. /// If `field` is not a i64 fast field, this method returns `None`.

View File

@@ -33,7 +33,7 @@ impl FastFieldsWriter {
let mut bytes_value_writers = Vec::new(); let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() { for (field, field_entry) in schema.fields() {
match field_entry.field_type() { match *field_entry.field_type() {
FieldType::I64(ref int_options) FieldType::I64(ref int_options)
| FieldType::U64(ref int_options) | FieldType::U64(ref int_options)
| FieldType::F64(ref int_options) | FieldType::F64(ref int_options)
@@ -56,11 +56,9 @@ impl FastFieldsWriter {
let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true); let fast_field_writer = MultiValueIntFastFieldWriter::new(field, true);
multi_values_writers.push(fast_field_writer); multi_values_writers.push(fast_field_writer);
} }
FieldType::Bytes(bytes_option) => { FieldType::Bytes => {
if bytes_option.is_fast() { let fast_field_writer = BytesFastFieldWriter::new(field);
let fast_field_writer = BytesFastFieldWriter::new(field); bytes_value_writers.push(fast_field_writer);
bytes_value_writers.push(fast_field_writer);
}
} }
_ => {} _ => {}
} }
@@ -128,7 +126,6 @@ impl FastFieldsWriter {
for field_writer in &self.single_value_writers { for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?; field_writer.serialize(serializer)?;
} }
for field_writer in &self.multi_values_writers { for field_writer in &self.multi_values_writers {
let field = field_writer.field(); let field = field_writer.field();
field_writer.serialize(serializer, mapping.get(&field))?; field_writer.serialize(serializer, mapping.get(&field))?;

View File

@@ -1,7 +1,6 @@
use super::{fieldnorm_to_id, id_to_fieldnorm}; use super::{fieldnorm_to_id, id_to_fieldnorm};
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::schema::Field; use crate::schema::Field;
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use crate::DocId; use crate::DocId;
@@ -20,21 +19,16 @@ pub struct FieldNormReaders {
impl FieldNormReaders { impl FieldNormReaders {
/// Creates a field norm reader. /// Creates a field norm reader.
pub fn open(file: FileSlice) -> crate::Result<FieldNormReaders> { pub fn new(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
let data = CompositeFile::open(&file)?; let data = CompositeFile::open(&source)?;
Ok(FieldNormReaders { Ok(FieldNormReaders {
data: Arc::new(data), data: Arc::new(data),
}) })
} }
/// Returns the FieldNormReader for a specific field. /// Returns the FieldNormReader for a specific field.
pub fn get_field(&self, field: Field) -> crate::Result<Option<FieldNormReader>> { pub fn get_field(&self, field: Field) -> Option<FieldNormReader> {
if let Some(file) = self.data.open_read(field) { self.data.open_read(field).map(FieldNormReader::open)
let fieldnorm_reader = FieldNormReader::open(file)?;
Ok(Some(fieldnorm_reader))
} else {
Ok(None)
}
} }
/// Return a break down of the space usage per field. /// Return a break down of the space usage per field.
@@ -61,56 +55,19 @@ impl FieldNormReaders {
/// precompute computationally expensive functions of the fieldnorm /// precompute computationally expensive functions of the fieldnorm
/// in a very short array. /// in a very short array.
#[derive(Clone)] #[derive(Clone)]
pub struct FieldNormReader(ReaderImplEnum); pub struct FieldNormReader {
data: ReadOnlySource,
impl From<ReaderImplEnum> for FieldNormReader {
fn from(reader_enum: ReaderImplEnum) -> FieldNormReader {
FieldNormReader(reader_enum)
}
}
#[derive(Clone)]
enum ReaderImplEnum {
FromData(OwnedBytes),
Const {
num_docs: u32,
fieldnorm_id: u8,
fieldnorm: u32,
},
} }
impl FieldNormReader { impl FieldNormReader {
/// Creates a `FieldNormReader` with a constant fieldnorm. /// Opens a field norm reader given its data source.
/// pub fn open(data: ReadOnlySource) -> Self {
/// The fieldnorm will be subjected to compression as if it was coming FieldNormReader { data }
/// from an array-backed fieldnorm reader.
pub fn constant(num_docs: u32, fieldnorm: u32) -> FieldNormReader {
let fieldnorm_id = fieldnorm_to_id(fieldnorm);
let fieldnorm = id_to_fieldnorm(fieldnorm_id);
ReaderImplEnum::Const {
num_docs,
fieldnorm_id,
fieldnorm,
}
.into()
}
/// Opens a field norm reader given its file.
pub fn open(fieldnorm_file: FileSlice) -> crate::Result<Self> {
let data = fieldnorm_file.read_bytes()?;
Ok(FieldNormReader::new(data))
}
fn new(data: OwnedBytes) -> Self {
ReaderImplEnum::FromData(data).into()
} }
/// Returns the number of documents in this segment. /// Returns the number of documents in this segment.
pub fn num_docs(&self) -> u32 { pub fn num_docs(&self) -> u32 {
match &self.0 { self.data.len() as u32
ReaderImplEnum::FromData(data) => data.len() as u32,
ReaderImplEnum::Const { num_docs, .. } => *num_docs,
}
} }
/// Returns the `fieldnorm` associated to a doc id. /// Returns the `fieldnorm` associated to a doc id.
@@ -123,25 +80,15 @@ impl FieldNormReader {
/// The fieldnorm is effectively decoded from the /// The fieldnorm is effectively decoded from the
/// `fieldnorm_id` by doing a simple table lookup. /// `fieldnorm_id` by doing a simple table lookup.
pub fn fieldnorm(&self, doc_id: DocId) -> u32 { pub fn fieldnorm(&self, doc_id: DocId) -> u32 {
match &self.0 { let fieldnorm_id = self.fieldnorm_id(doc_id);
ReaderImplEnum::FromData(data) => { id_to_fieldnorm(fieldnorm_id)
let fieldnorm_id = data.as_slice()[doc_id as usize];
id_to_fieldnorm(fieldnorm_id)
}
ReaderImplEnum::Const { fieldnorm, .. } => *fieldnorm,
}
} }
/// Returns the `fieldnorm_id` associated to a document. /// Returns the `fieldnorm_id` associated to a document.
#[inline(always)] #[inline(always)]
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 { pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
match &self.0 { let fielnorms_data = self.data.as_slice();
ReaderImplEnum::FromData(data) => { fielnorms_data[doc_id as usize]
let fieldnorm_id = data.as_slice()[doc_id as usize];
fieldnorm_id
}
ReaderImplEnum::Const { fieldnorm_id, .. } => *fieldnorm_id,
}
} }
/// Converts a `fieldnorm_id` into a fieldnorm. /// Converts a `fieldnorm_id` into a fieldnorm.
@@ -156,48 +103,19 @@ impl FieldNormReader {
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 { pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
fieldnorm_to_id(fieldnorm) fieldnorm_to_id(fieldnorm)
} }
}
#[cfg(test)] #[cfg(test)]
pub fn for_test(field_norms: &[u32]) -> FieldNormReader { impl From<&[u32]> for FieldNormReader {
fn from(field_norms: &[u32]) -> FieldNormReader {
let field_norms_id = field_norms let field_norms_id = field_norms
.iter() .iter()
.cloned() .cloned()
.map(FieldNormReader::fieldnorm_to_id) .map(FieldNormReader::fieldnorm_to_id)
.collect::<Vec<u8>>(); .collect::<Vec<u8>>();
let field_norms_data = OwnedBytes::new(field_norms_id); let field_norms_data = ReadOnlySource::from(field_norms_id);
FieldNormReader::new(field_norms_data) FieldNormReader {
} data: field_norms_data,
} }
#[cfg(test)]
mod tests {
use crate::fieldnorm::FieldNormReader;
#[test]
fn test_from_fieldnorms_array() {
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
assert_eq!(fieldnorm_reader.num_docs(), 5);
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
}
#[test]
fn test_const_fieldnorm_reader_small_fieldnorm_id() {
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 10u32);
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 10u32);
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 10u8);
}
#[test]
fn test_const_fieldnorm_reader_large_fieldnorm_id() {
let fieldnorm_reader = FieldNormReader::constant(1_000_000u32, 300u32);
assert_eq!(fieldnorm_reader.num_docs(), 1_000_000u32);
assert_eq!(fieldnorm_reader.fieldnorm(0u32), 280u32);
assert_eq!(fieldnorm_reader.fieldnorm_id(0u32), 72u8);
} }
} }

View File

@@ -4,7 +4,7 @@ use super::fieldnorm_to_id;
use super::FieldNormsSerializer; use super::FieldNormsSerializer;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::Schema; use crate::schema::Schema;
use std::{io, iter}; use std::io;
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte /// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
/// of each document for each field with field norms. /// of each document for each field with field norms.
@@ -44,9 +44,7 @@ impl FieldNormsWriter {
.unwrap_or(0); .unwrap_or(0);
FieldNormsWriter { FieldNormsWriter {
fields, fields,
fieldnorms_buffer: iter::repeat_with(Vec::new) fieldnorms_buffer: (0..max_field).map(|_| Vec::new()).collect::<Vec<_>>(),
.take(max_field)
.collect::<Vec<_>>(),
} }
} }

View File

@@ -1,93 +1,45 @@
use crate::Index;
use crate::Searcher;
use crate::{doc, schema::*};
use rand::thread_rng; use rand::thread_rng;
use rand::Rng;
use std::collections::HashSet; use std::collections::HashSet;
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> { use crate::schema::*;
use crate::Index;
use crate::Searcher;
use rand::Rng;
fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
assert!(searcher.segment_readers().len() < 20); assert!(searcher.segment_readers().len() < 20);
assert_eq!(searcher.num_docs() as usize, vals.len()); assert_eq!(searcher.num_docs() as usize, vals.len());
for segment_reader in searcher.segment_readers() {
let store_reader = segment_reader.get_store_reader()?;
for doc_id in 0..segment_reader.max_doc() {
let _doc = store_reader.get(doc_id)?;
}
}
Ok(())
} }
#[test] #[test]
#[ignore] #[ignore]
fn test_functional_store() -> crate::Result<()> { fn test_indexing() {
let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let reader = index.reader()?;
let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 12_000_000)?;
let mut doc_set: Vec<u64> = Vec::new();
let mut doc_id = 0u64;
for iteration in 0..500 {
dbg!(iteration);
let num_docs: usize = rng.gen_range(0..4);
if doc_set.len() >= 1 {
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
}
for _ in 0..num_docs {
doc_set.push(doc_id);
index_writer.add_document(doc!(id_field=>doc_id));
doc_id += 1;
}
index_writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
check_index_content(&searcher, &doc_set)?;
}
Ok(())
}
#[test]
#[ignore]
fn test_functional_indexing() -> crate::Result<()> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED); let id_field = schema_builder.add_u64_field("id", INDEXED);
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED); let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema)?; let index = Index::create_from_tempdir(schema).unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let mut rng = thread_rng(); let mut rng = thread_rng();
let mut index_writer = index.writer_with_num_threads(3, 120_000_000)?; let mut index_writer = index.writer_with_num_threads(3, 120_000_000).unwrap();
let mut committed_docs: HashSet<u64> = HashSet::new(); let mut committed_docs: HashSet<u64> = HashSet::new();
let mut uncommitted_docs: HashSet<u64> = HashSet::new(); let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..200 { for _ in 0..200 {
let random_val = rng.gen_range(0..20); let random_val = rng.gen_range(0, 20);
if random_val == 0 { if random_val == 0 {
index_writer.commit()?; index_writer.commit().expect("Commit failed");
committed_docs.extend(&uncommitted_docs); committed_docs.extend(&uncommitted_docs);
uncommitted_docs.clear(); uncommitted_docs.clear();
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
// check that everything is correct. // check that everything is correct.
check_index_content( check_index_content(&searcher, &committed_docs);
&searcher,
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
)?;
} else { } else {
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) { if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u64(id_field, random_val); let doc_id_term = Term::from_field_u64(id_field, random_val);
@@ -103,5 +55,4 @@ fn test_functional_indexing() -> crate::Result<()> {
} }
} }
} }
Ok(())
} }

View File

@@ -53,7 +53,7 @@ impl DeleteQueue {
return block; return block;
} }
let block = Arc::new(Block { let block = Arc::new(Block {
operations: Arc::new([]), operations: Arc::default(),
next: NextBlock::from(self.clone()), next: NextBlock::from(self.clone()),
}); });
wlock.last_block = Arc::downgrade(&block); wlock.last_block = Arc::downgrade(&block);
@@ -108,7 +108,7 @@ impl DeleteQueue {
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]); let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
let new_block = Arc::new(Block { let new_block = Arc::new(Block {
operations: Arc::from(delete_operations.into_boxed_slice()), operations: Arc::new(delete_operations.into_boxed_slice()),
next: NextBlock::from(self.clone()), next: NextBlock::from(self.clone()),
}); });
@@ -167,7 +167,7 @@ impl NextBlock {
} }
struct Block { struct Block {
operations: Arc<[DeleteOperation]>, operations: Arc<Box<[DeleteOperation]>>,
next: NextBlock, next: NextBlock,
} }

View File

@@ -108,9 +108,9 @@ fn compute_deleted_bitset(
// Limit doc helps identify the first document // Limit doc helps identify the first document
// that may be affected by the delete operation. // that may be affected by the delete operation.
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp); let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?; let inverted_index = segment_reader.inverted_index(delete_op.term.field());
if let Some(mut docset) = if let Some(mut docset) =
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)? inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)
{ {
let mut deleted_doc = docset.doc(); let mut deleted_doc = docset.doc();
while deleted_doc != TERMINATED { while deleted_doc != TERMINATED {
@@ -449,7 +449,7 @@ impl IndexWriter {
} }
/// Accessor to the merge policy. /// Accessor to the merge policy.
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> { pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.segment_updater.get_merge_policy() self.segment_updater.get_merge_policy()
} }
@@ -536,7 +536,6 @@ impl IndexWriter {
/// when no documents are remaining. /// when no documents are remaining.
/// ///
/// Returns the former segment_ready channel. /// Returns the former segment_ready channel.
#[allow(unused_must_use)]
fn recreate_document_channel(&mut self) -> OperationReceiver { fn recreate_document_channel(&mut self) -> OperationReceiver {
let (document_sender, document_receiver): (OperationSender, OperationReceiver) = let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS); channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
@@ -576,7 +575,7 @@ impl IndexWriter {
// //
// This will drop the document queue, and the thread // This will drop the document queue, and the thread
// should terminate. // should terminate.
*self = new_index_writer; mem::replace(self, new_index_writer);
// Drains the document receiver pipeline : // Drains the document receiver pipeline :
// Workers don't need to index the pending documents. // Workers don't need to index the pending documents.
@@ -800,7 +799,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let index_writer = index.writer_for_tests().unwrap(); let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let operations = vec![ let operations = vec![
UserOperation::Add(doc!(text_field=>"a")), UserOperation::Add(doc!(text_field=>"a")),
UserOperation::Add(doc!(text_field=>"b")), UserOperation::Add(doc!(text_field=>"b")),
@@ -815,7 +814,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "hello1")); index_writer.add_document(doc!(text_field => "hello1"));
index_writer.add_document(doc!(text_field => "hello2")); index_writer.add_document(doc!(text_field => "hello2"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
@@ -864,7 +863,7 @@ mod tests {
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into() .try_into()
.unwrap(); .unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let a_term = Term::from_field_text(text_field, "a"); let a_term = Term::from_field_text(text_field, "a");
let b_term = Term::from_field_text(text_field, "b"); let b_term = Term::from_field_text(text_field, "b");
let operations = vec![ let operations = vec![
@@ -926,8 +925,8 @@ mod tests {
fn test_lockfile_already_exists_error_msg() { fn test_lockfile_already_exists_error_msg() {
let schema_builder = schema::Schema::builder(); let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer_for_tests().unwrap(); let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
match index.writer_for_tests() { match index.writer_with_num_threads(1, 3_000_000) {
Err(err) => { Err(err) => {
let err_msg = err.to_string(); let err_msg = err.to_string();
assert!(err_msg.contains("already an `IndexWriter`")); assert!(err_msg.contains("already an `IndexWriter`"));
@@ -979,7 +978,7 @@ mod tests {
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_text(text_field, s); let term = Term::from_field_text(text_field, s);
searcher.doc_freq(&term).unwrap() searcher.doc_freq(&term)
}; };
{ {
@@ -1015,7 +1014,7 @@ mod tests {
.unwrap(); .unwrap();
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let term_a = Term::from_field_text(text_field, s); let term_a = Term::from_field_text(text_field, s);
reader.searcher().doc_freq(&term_a).unwrap() reader.searcher().doc_freq(&term_a)
}; };
{ {
// writing the segment // writing the segment
@@ -1110,7 +1109,6 @@ mod tests {
.unwrap() .unwrap()
.searcher() .searcher()
.doc_freq(&term_a) .doc_freq(&term_a)
.unwrap()
}; };
assert_eq!(num_docs_containing("a"), 0); assert_eq!(num_docs_containing("a"), 0);
assert_eq!(num_docs_containing("b"), 100); assert_eq!(num_docs_containing("b"), 100);
@@ -1130,7 +1128,7 @@ mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_text(text_field, s); let term = Term::from_field_text(text_field, s);
searcher.doc_freq(&term).unwrap() searcher.doc_freq(&term)
}; };
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
@@ -1181,15 +1179,7 @@ mod tests {
// working with an empty index == no documents // working with an empty index == no documents
let term_b = Term::from_field_text(text_field, "b"); let term_b = Term::from_field_text(text_field, "b");
assert_eq!( assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_b), 0);
index
.reader()
.unwrap()
.searcher()
.doc_freq(&term_b)
.unwrap(),
0
);
} }
#[test] #[test]
@@ -1209,15 +1199,7 @@ mod tests {
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
// expect the document with that term to be in the index // expect the document with that term to be in the index
assert_eq!( assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
index
.reader()
.unwrap()
.searcher()
.doc_freq(&term_a)
.unwrap(),
1
);
} }
#[test] #[test]
@@ -1243,15 +1225,7 @@ mod tests {
// Find original docs in the index // Find original docs in the index
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
// expect the document with that term to be in the index // expect the document with that term to be in the index
assert_eq!( assert_eq!(index.reader().unwrap().searcher().doc_freq(&term_a), 1);
index
.reader()
.unwrap()
.searcher()
.doc_freq(&term_a)
.unwrap(),
1
);
} }
#[test] #[test]
@@ -1286,7 +1260,7 @@ mod tests {
let idfield = schema_builder.add_text_field("id", STRING); let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING); schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid")); index_writer.add_document(doc!(idfield=>"myid"));
let commit = index_writer.commit(); let commit = index_writer.commit();
assert!(commit.is_ok()); assert!(commit.is_ok());

View File

@@ -8,7 +8,7 @@ const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
const DEFAULT_MIN_MERGE_SIZE: usize = 8; const DEFAULT_MIN_MERGE_SIZE: usize = 8;
const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000; const DEFAULT_MAX_MERGE_SIZE: usize = 10_000_000;
/// `LogMergePolicy` tries to merge segments that have a similar number of /// `LogMergePolicy` tries tries to merge segments that have a similar number of
/// documents. /// documents.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct LogMergePolicy { pub struct LogMergePolicy {

View File

@@ -8,40 +8,39 @@ use crate::fastfield::DeleteBitSet;
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use crate::fastfield::MultiValueIntFastFieldReader; use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fieldnorm::FieldNormReader;
use crate::fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings; use crate::postings::Postings;
use crate::postings::{InvertedIndexSerializer, SegmentPostings};
use crate::schema::Cardinality; use crate::schema::Cardinality;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::{Field, Schema}; use crate::schema::{Field, Schema};
use crate::store::StoreWriter; use crate::store::StoreWriter;
use crate::termdict::TermMerger; use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use crate::{DocId, InvertedIndexReader, SegmentComponent}; use crate::DocId;
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::Result<u64> { fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64; let mut total_tokens = 0u64;
let mut count: [usize; 256] = [0; 256]; let mut count: [usize; 256] = [0; 256];
for reader in readers { for reader in readers {
if reader.has_deletes() { if reader.has_deletes() {
// if there are deletes, then we use an approximation // if there are deletes, then we use an approximation
// using the fieldnorm // using the fieldnorm
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?; let fieldnorms_reader = reader.get_fieldnorms_reader(field);
for doc in reader.doc_ids_alive() { for doc in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc); let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc);
count[fieldnorm_id as usize] += 1; count[fieldnorm_id as usize] += 1;
} }
} else { } else {
total_tokens += reader.inverted_index(field)?.total_num_tokens(); total_tokens += reader.inverted_index(field).total_num_tokens();
} }
} }
Ok(total_tokens total_tokens
+ count + count
.iter() .iter()
.cloned() .cloned()
@@ -49,7 +48,7 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::R
.map(|(fieldnorm_ord, count)| { .map(|(fieldnorm_ord, count)| {
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8)) count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
}) })
.sum::<u64>()) .sum::<u64>()
} }
pub struct IndexMerger { pub struct IndexMerger {
@@ -175,7 +174,7 @@ impl IndexMerger {
for field in fields { for field in fields {
fieldnorms_data.clear(); fieldnorms_data.clear();
for reader in &self.readers { for reader in &self.readers {
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?; let fieldnorms_reader = reader.get_fieldnorms_reader(field);
for doc_id in reader.doc_ids_alive() { for doc_id in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id); let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id);
fieldnorms_data.push(fieldnorm_id); fieldnorms_data.push(fieldnorm_id);
@@ -194,7 +193,7 @@ impl IndexMerger {
) -> crate::Result<()> { ) -> crate::Result<()> {
for (field, field_entry) in self.schema.fields() { for (field, field_entry) in self.schema.fields() {
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
match field_type { match *field_type {
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
let term_ordinal_mapping = term_ord_mappings let term_ordinal_mapping = term_ord_mappings
.remove(&field) .remove(&field)
@@ -223,10 +222,8 @@ impl IndexMerger {
// They can be implemented using what is done // They can be implemented using what is done
// for facets in the future. // for facets in the future.
} }
FieldType::Bytes(byte_options) => { FieldType::Bytes => {
if byte_options.is_fast() { self.write_bytes_fast_field(field, fast_field_serializer)?;
self.write_bytes_fast_field(field, fast_field_serializer)?;
}
} }
} }
} }
@@ -445,11 +442,9 @@ impl IndexMerger {
let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new(); let mut bytes_readers: Vec<BytesFastFieldReader> = Vec::new();
for reader in &self.readers { for reader in &self.readers {
let bytes_reader = reader.fast_fields().bytes(field).ok_or_else(|| { let bytes_reader = reader.fast_fields().bytes(field).expect(
crate::TantivyError::InvalidArgument( "Failed to find bytes fast field reader. This is a bug in tantivy, please report.",
"Bytes fast field {:?} not found in segment.".to_string(), );
)
})?;
if let Some(delete_bitset) = reader.delete_bitset() { if let Some(delete_bitset) = reader.delete_bitset() {
for doc in 0u32..reader.max_doc() { for doc in 0u32..reader.max_doc() {
if delete_bitset.is_alive(doc) { if delete_bitset.is_alive(doc) {
@@ -498,23 +493,21 @@ impl IndexMerger {
indexed_field: Field, indexed_field: Field,
field_type: &FieldType, field_type: &FieldType,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>,
) -> crate::Result<Option<TermOrdinalMapping>> { ) -> crate::Result<Option<TermOrdinalMapping>> {
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000); let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new(); let mut delta_computer = DeltaComputer::new();
let field_readers = self
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
let field_readers: Vec<Arc<InvertedIndexReader>> = self
.readers .readers
.iter() .iter()
.map(|reader| reader.inverted_index(indexed_field)) .map(|reader| reader.inverted_index(indexed_field))
.collect::<crate::Result<Vec<_>>>()?; .collect::<Vec<_>>();
let mut field_term_streams = Vec::new(); let mut field_term_streams = Vec::new();
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
for field_reader in &field_readers { for field_reader in &field_readers {
let terms = field_reader.terms(); let terms = field_reader.terms();
field_term_streams.push(terms.stream()?); field_term_streams.push(terms.stream());
max_term_ords.push(terms.num_terms() as u64); max_term_ords.push(terms.num_terms() as u64);
} }
@@ -546,7 +539,7 @@ impl IndexMerger {
// The total number of tokens will only be exact when there has been no deletes. // The total number of tokens will only be exact when there has been no deletes.
// //
// Otherwise, we approximate by removing deleted documents proportionally. // Otherwise, we approximate by removing deleted documents proportionally.
let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field)?; let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field);
// Create the total list of doc ids // Create the total list of doc ids
// by stacking the doc ids from the different segment. // by stacking the doc ids from the different segment.
@@ -558,8 +551,7 @@ impl IndexMerger {
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc, // - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc,
// seg0.max_doc + seg1.max_doc + seg2.max_doc] // seg0.max_doc + seg1.max_doc + seg2.max_doc]
// ... // ...
let mut field_serializer = let mut field_serializer = serializer.new_field(indexed_field, total_num_tokens)?;
serializer.new_field(indexed_field, total_num_tokens, fieldnorm_reader)?;
let field_entry = self.schema.get_field_entry(indexed_field); let field_entry = self.schema.get_field_entry(indexed_field);
@@ -569,45 +561,43 @@ impl IndexMerger {
indexed. Have you modified the schema?", indexed. Have you modified the schema?",
); );
let mut segment_postings_containing_the_term: Vec<(usize, SegmentPostings)> = vec![];
while merged_terms.advance() { while merged_terms.advance() {
segment_postings_containing_the_term.clear();
let term_bytes: &[u8] = merged_terms.key(); let term_bytes: &[u8] = merged_terms.key();
let mut total_doc_freq = 0;
// Let's compute the list of non-empty posting lists // Let's compute the list of non-empty posting lists
for heap_item in merged_terms.current_kvs() { let segment_postings: Vec<_> = merged_terms
let segment_ord = heap_item.segment_ord; .current_kvs()
let term_info = heap_item.streamer.value(); .iter()
let segment_reader = &self.readers[heap_item.segment_ord]; .flat_map(|heap_item| {
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord]; let segment_ord = heap_item.segment_ord;
let segment_postings = inverted_index let term_info = heap_item.streamer.value();
.read_postings_from_terminfo(term_info, segment_postings_option)?; let segment_reader = &self.readers[heap_item.segment_ord];
let delete_bitset_opt = segment_reader.delete_bitset(); let inverted_index = segment_reader.inverted_index(indexed_field);
let doc_freq = if let Some(delete_bitset) = delete_bitset_opt { let mut segment_postings = inverted_index
segment_postings.doc_freq_given_deletes(delete_bitset) .read_postings_from_terminfo(term_info, segment_postings_option);
} else { let mut doc = segment_postings.doc();
segment_postings.doc_freq() while doc != TERMINATED {
}; if !segment_reader.is_deleted(doc) {
if doc_freq > 0u32 { return Some((segment_ord, segment_postings));
total_doc_freq += doc_freq; }
segment_postings_containing_the_term.push((segment_ord, segment_postings)); doc = segment_postings.advance();
} }
} None
})
.collect();
// At this point, `segment_postings` contains the posting list // At this point, `segment_postings` contains the posting list
// of all of the segments containing the given term (and that are non-empty) // of all of the segments containing the given term.
// //
// These segments are non-empty and advance has already been called. // These segments are non-empty and advance has already been called.
if total_doc_freq == 0u32 { if segment_postings.is_empty() {
// All docs that used to contain the term have been deleted. The `term` will be
// entirely removed.
continue; continue;
} }
// If not, the `term` will be entirely removed.
let to_term_ord = field_serializer.new_term(term_bytes, total_doc_freq)?; // We know that there is at least one document containing
// the term, so we add it.
let to_term_ord = field_serializer.new_term(term_bytes)?;
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt { if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
for (segment_ord, from_term_ord) in merged_terms.matching_segments() { for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
@@ -617,9 +607,7 @@ impl IndexMerger {
// We can now serialize this postings, by pushing each document to the // We can now serialize this postings, by pushing each document to the
// postings serializer. // postings serializer.
for (segment_ord, mut segment_postings) in for (segment_ord, mut segment_postings) in segment_postings {
segment_postings_containing_the_term.drain(..)
{
let old_to_new_doc_id = &merged_doc_id_map[segment_ord]; let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
let mut doc = segment_postings.doc(); let mut doc = segment_postings.doc();
@@ -649,18 +637,13 @@ impl IndexMerger {
fn write_postings( fn write_postings(
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> { ) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new(); let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() { for (field, field_entry) in self.schema.fields() {
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
if field_entry.is_indexed() { if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = self.write_postings_for_field( if let Some(term_ordinal_mapping) =
field, self.write_postings_for_field(field, field_entry.field_type(), serializer)?
field_entry.field_type(), {
serializer,
fieldnorm_reader,
)? {
term_ordinal_mappings.insert(field, term_ordinal_mapping); term_ordinal_mappings.insert(field, term_ordinal_mapping);
} }
} }
@@ -670,7 +653,7 @@ impl IndexMerger {
fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> { fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> {
for reader in &self.readers { for reader in &self.readers {
let store_reader = reader.get_store_reader()?; let store_reader = reader.get_store_reader();
if reader.num_deleted_docs() > 0 { if reader.num_deleted_docs() > 0 {
for doc_id in reader.doc_ids_alive() { for doc_id in reader.doc_ids_alive() {
let doc = store_reader.get(doc_id)?; let doc = store_reader.get(doc_id)?;
@@ -689,12 +672,7 @@ impl SerializableSegment for IndexMerger {
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() { if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
self.write_fieldnorms(fieldnorms_serializer)?; self.write_fieldnorms(fieldnorms_serializer)?;
} }
let fieldnorm_data = serializer let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let term_ord_mappings =
self.write_postings(serializer.get_postings_serializer(), fieldnorm_readers)?;
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?; self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
self.write_storable_fields(serializer.get_store_writer())?; self.write_storable_fields(serializer.get_store_writer())?;
serializer.close()?; serializer.close()?;
@@ -704,15 +682,15 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::assert_nearly_equals;
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE; use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector}; use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use crate::collector::{Count, FacetCollector}; use crate::collector::{Count, FacetCollector};
use crate::core::Index; use crate::core::Index;
use crate::query::AllQuery; use crate::query::AllQuery;
use crate::query::BooleanQuery; use crate::query::BooleanQuery;
use crate::query::Scorer;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema;
use crate::schema::Cardinality;
use crate::schema::Document; use crate::schema::Document;
use crate::schema::Facet; use crate::schema::Facet;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
@@ -720,17 +698,15 @@ mod tests {
use crate::schema::Term; use crate::schema::Term;
use crate::schema::TextFieldIndexing; use crate::schema::TextFieldIndexing;
use crate::schema::INDEXED; use crate::schema::INDEXED;
use crate::schema::{Cardinality, TEXT};
use crate::DocAddress; use crate::DocAddress;
use crate::IndexWriter; use crate::IndexWriter;
use crate::Searcher; use crate::Searcher;
use crate::{schema, DocSet, SegmentId}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use byteorder::{BigEndian, ReadBytesExt};
use futures::executor::block_on; use futures::executor::block_on;
use schema::FAST; use std::io::Cursor;
#[test] #[test]
fn test_index_merger_no_deletes() -> crate::Result<()> { fn test_index_merger_no_deletes() {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default() let text_fieldtype = schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
@@ -743,77 +719,98 @@ mod tests {
let date_field = schema_builder.add_date_field("date", INDEXED); let date_field = schema_builder.add_date_field("date", INDEXED);
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype); let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST); let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader()?; let reader = index.reader().unwrap();
let curr_time = chrono::Utc::now(); let curr_time = chrono::Utc::now();
{ let add_score_bytes = |doc: &mut Document, score: u32| {
let mut index_writer = index.writer_for_tests()?; let mut bytes = Vec::new();
// writing the segment bytes
index_writer.add_document(doc!( .write_u32::<BigEndian>(score)
text_field => "af b", .expect("failed to write u32 bytes to Vec...");
score_field => 3u64, doc.add_bytes(bytes_score_field, bytes);
date_field => curr_time, };
bytes_score_field => 3u32.to_be_bytes().as_ref()
));
index_writer.add_document(doc!( {
text_field => "a b c", let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
score_field => 5u64, {
bytes_score_field => 5u32.to_be_bytes().as_ref() // writing the segment
)); {
index_writer.add_document(doc!( let mut doc = Document::default();
text_field => "a b c d", doc.add_text(text_field, "af b");
score_field => 7u64, doc.add_u64(score_field, 3);
bytes_score_field => 7u32.to_be_bytes().as_ref() doc.add_date(date_field, &curr_time);
)); add_score_bytes(&mut doc, 3);
index_writer.commit()?; index_writer.add_document(doc);
// writing the segment }
index_writer.add_document(doc!( {
text_field => "af b", let mut doc = Document::default();
date_field => curr_time, doc.add_text(text_field, "a b c");
score_field => 11u64, doc.add_u64(score_field, 5);
bytes_score_field => 11u32.to_be_bytes().as_ref() add_score_bytes(&mut doc, 5);
)); index_writer.add_document(doc);
index_writer.add_document(doc!( }
text_field => "a b c g", {
score_field => 13u64, let mut doc = Document::default();
bytes_score_field => 13u32.to_be_bytes().as_ref() doc.add_text(text_field, "a b c d");
)); doc.add_u64(score_field, 7);
index_writer.commit()?; add_score_bytes(&mut doc, 7);
index_writer.add_document(doc);
}
index_writer.commit().expect("committed");
}
{
// writing the segment
{
let mut doc = Document::default();
doc.add_text(text_field, "af b");
doc.add_date(date_field, &curr_time);
doc.add_u64(score_field, 11);
add_score_bytes(&mut doc, 11);
index_writer.add_document(doc);
}
{
let mut doc = Document::default();
doc.add_text(text_field, "a b c g");
doc.add_u64(score_field, 13);
add_score_bytes(&mut doc, 13);
index_writer.add_document(doc);
}
index_writer.commit().expect("Commit failed");
}
} }
{ {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids))?; block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer.wait_merging_threads()?; index_writer.wait_merging_threads().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
searcher let top_docs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
.search(&query, &TEST_COLLECTOR_WITH_SCORE) top_docs.docs().to_vec()
.map(|top_docs| top_docs.docs().to_vec())
}; };
{ {
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?, get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![DocAddress(0, 1), DocAddress(0, 2), DocAddress(0, 4)] vec![DocAddress(0, 1), DocAddress(0, 2), DocAddress(0, 4)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?, get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![DocAddress(0, 0), DocAddress(0, 3)] vec![DocAddress(0, 0), DocAddress(0, 3)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "g")])?, get_doc_ids(vec![Term::from_field_text(text_field, "g")]),
vec![DocAddress(0, 4)] vec![DocAddress(0, 4)]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?, get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![ vec![
DocAddress(0, 0), DocAddress(0, 0),
DocAddress(0, 1), DocAddress(0, 1),
@@ -823,57 +820,60 @@ mod tests {
] ]
); );
assert_eq!( assert_eq!(
get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)])?, get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)]),
vec![DocAddress(0, 0), DocAddress(0, 3)] vec![DocAddress(0, 0), DocAddress(0, 3)]
); );
} }
{ {
let doc = searcher.doc(DocAddress(0, 0))?; let doc = searcher.doc(DocAddress(0, 0)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 1))?; let doc = searcher.doc(DocAddress(0, 1)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 2))?; let doc = searcher.doc(DocAddress(0, 2)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c d")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c d"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 3))?; let doc = searcher.doc(DocAddress(0, 3)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
} }
{ {
let doc = searcher.doc(DocAddress(0, 4))?; let doc = searcher.doc(DocAddress(0, 4)).unwrap();
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c g")); assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c g"));
} }
{ {
let get_fast_vals = |terms: Vec<Term>| { let get_fast_vals = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
searcher.search(&query, &FastFieldTestCollector::for_field(score_field)) searcher
.search(&query, &FastFieldTestCollector::for_field(score_field))
.unwrap()
}; };
let get_fast_vals_bytes = |terms: Vec<Term>| { let get_fast_vals_bytes = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
searcher.search( searcher
&query, .search(
&BytesFastFieldTestCollector::for_field(bytes_score_field), &query,
) &BytesFastFieldTestCollector::for_field(bytes_score_field),
)
.expect("failed to search")
}; };
assert_eq!( assert_eq!(
get_fast_vals(vec![Term::from_field_text(text_field, "a")])?, get_fast_vals(vec![Term::from_field_text(text_field, "a")]),
vec![5, 7, 13] vec![5, 7, 13]
); );
assert_eq!( assert_eq!(
get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?, get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")]),
vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13] vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13]
); );
} }
} }
Ok(())
} }
#[test] #[test]
fn test_index_merger_with_deletes() -> crate::Result<()> { fn test_index_merger_with_deletes() {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default() let text_fieldtype = schema::TextOptions::default()
.set_indexing_options( .set_indexing_options(
@@ -883,26 +883,27 @@ mod tests {
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype); let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST); let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let search_term = |searcher: &Searcher, term: Term| { let search_term = |searcher: &Searcher, term: Term| {
let collector = FastFieldTestCollector::for_field(score_field); let collector = FastFieldTestCollector::for_field(score_field);
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field); let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
let term_query = TermQuery::new(term, IndexRecordOption::Basic); let term_query = TermQuery::new(term, IndexRecordOption::Basic);
searcher let (scores, bytes) = searcher
.search(&term_query, &(collector, bytes_collector)) .search(&term_query, &(collector, bytes_collector))
.map(|(scores, bytes)| { .unwrap();
let mut score_bytes = &bytes[..]; let mut score_bytes = Cursor::new(bytes);
for &score in &scores { for &score in &scores {
assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap()); assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap());
} }
scores
}) scores
}; };
let empty_vec = Vec::<u64>::new(); let empty_vec = Vec::<u64>::new();
{ {
// a first commit // a first commit
index_writer.add_document(doc!( index_writer.add_document(doc!(
@@ -921,26 +922,26 @@ mod tests {
score_field => 3u64, score_field => 3u64,
bytes_score_field => vec![0u8, 0, 0, 3], bytes_score_field => vec![0u8, 0, 0, 3],
)); ));
index_writer.commit()?; index_writer.commit().expect("committed");
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
vec![1] vec![1]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
vec![1] vec![1]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![1, 3] vec![1, 3]
); );
} }
@@ -968,8 +969,8 @@ mod tests {
score_field => 7_000u64, score_field => 7_000u64,
bytes_score_field => vec![0u8, 0, 27, 88], bytes_score_field => vec![0u8, 0, 27, 88],
)); ));
index_writer.commit()?; index_writer.commit().expect("committed");
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2); assert_eq!(searcher.segment_readers().len(), 2);
@@ -979,31 +980,31 @@ mod tests {
assert_eq!(searcher.segment_readers()[1].num_docs(), 1); assert_eq!(searcher.segment_readers()[1].num_docs(), 1);
assert_eq!(searcher.segment_readers()[1].max_doc(), 3); assert_eq!(searcher.segment_readers()[1].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
@@ -1025,40 +1026,42 @@ mod tests {
} }
{ {
// merging the segments // merging the segments
let segment_ids = index.searchable_segment_ids()?; let segment_ids = index
block_on(index_writer.merge(&segment_ids))?; .searchable_segment_ids()
reader.reload()?; .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 3); assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 3); assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
vec![3] vec![3]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
let score_field_reader = searcher let score_field_reader = searcher
@@ -1072,40 +1075,40 @@ mod tests {
{ {
// test a commit with only deletes // test a commit with only deletes
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.commit()?; index_writer.commit().unwrap();
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
let score_field_reader = searcher let score_field_reader = searcher
@@ -1118,9 +1121,11 @@ mod tests {
} }
{ {
// Test merging a single segment in order to remove deletes. // Test merging a single segment in order to remove deletes.
let segment_ids = index.searchable_segment_ids()?; let segment_ids = index
block_on(index_writer.merge(&segment_ids))?; .searchable_segment_ids()
reader.reload()?; .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
@@ -1128,31 +1133,31 @@ mod tests {
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 2); assert_eq!(searcher.segment_readers()[0].max_doc(), 2);
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?, search_term(&searcher, Term::from_field_text(text_field, "a")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?, search_term(&searcher, Term::from_field_text(text_field, "b")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?, search_term(&searcher, Term::from_field_text(text_field, "c")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?, search_term(&searcher, Term::from_field_text(text_field, "d")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?, search_term(&searcher, Term::from_field_text(text_field, "e")),
empty_vec empty_vec
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?, search_term(&searcher, Term::from_field_text(text_field, "f")),
vec![6_000] vec![6_000]
); );
assert_eq!( assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?, search_term(&searcher, Term::from_field_text(text_field, "g")),
vec![6_000, 7_000] vec![6_000, 7_000]
); );
let score_field_reader = searcher let score_field_reader = searcher
@@ -1167,16 +1172,17 @@ mod tests {
{ {
// Test removing all docs // Test removing all docs
index_writer.delete_term(Term::from_field_text(text_field, "g")); index_writer.delete_term(Term::from_field_text(text_field, "g"));
index_writer.commit()?; index_writer.commit().unwrap();
let segment_ids = index.searchable_segment_ids()?; let segment_ids = index
reader.reload()?; .searchable_segment_ids()
.expect("Searchable segments failed.");
reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert!(segment_ids.is_empty()); assert!(segment_ids.is_empty());
assert!(searcher.segment_readers().is_empty()); assert!(searcher.segment_readers().is_empty());
assert_eq!(searcher.num_docs(), 0); assert_eq!(searcher.num_docs(), 0);
} }
Ok(())
} }
#[test] #[test]
@@ -1186,7 +1192,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| { let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
let mut doc = Document::default(); let mut doc = Document::default();
for facet in doc_facets { for facet in doc_facets {
@@ -1251,7 +1257,7 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
@@ -1270,7 +1276,7 @@ mod tests {
// Deleting one term // Deleting one term
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]); let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
let facet_term = Term::from_facet(facet_field, &facet); let facet_term = Term::from_facet(facet_field, &facet);
index_writer.delete_term(facet_term); index_writer.delete_term(facet_term);
@@ -1295,7 +1301,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let int_field = schema_builder.add_u64_field("intvals", INDEXED); let int_field = schema_builder.add_u64_field("intvals", INDEXED);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(int_field => 1u64)); index_writer.add_document(doc!(int_field => 1u64));
index_writer.commit().expect("commit failed"); index_writer.commit().expect("commit failed");
index_writer.add_document(doc!(int_field => 1u64)); index_writer.add_document(doc!(int_field => 1u64));
@@ -1324,7 +1330,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_u64(int_field, 1); doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone()); index_writer.add_document(doc.clone());
@@ -1363,7 +1369,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| { let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
let mut doc = Document::default(); let mut doc = Document::default();
for &val in int_vals { for &val in int_vals {
@@ -1437,7 +1443,7 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert!(block_on(index_writer.merge(&segment_ids)).is_ok()); assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
assert!(index_writer.wait_merging_threads().is_ok()); assert!(index_writer.wait_merging_threads().is_ok());
} }
@@ -1491,7 +1497,7 @@ mod tests {
let index = Index::create_in_ram(builder.build()); let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?; let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
// Make sure we'll attempt to merge every created segment // Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default(); let mut policy = crate::indexer::LogMergePolicy::default();
@@ -1517,71 +1523,4 @@ mod tests {
assert_eq!(1, index.searchable_segments()?.len()); assert_eq!(1, index.searchable_segments()?.len());
Ok(()) Ok(())
} }
#[test]
fn test_merged_index_has_blockwand() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let text = builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?;
let happy_term = Term::from_field_text(text, "happy");
let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs);
for _ in 0..62 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(searcher.segment_reader(0u32), 1.0)?;
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
for _ in 0..81 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
}
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect();
block_on(writer.merge(&segment_ids[..]))?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
Ok(())
}
} }

View File

@@ -29,9 +29,8 @@ pub use self::segment_writer::SegmentWriter;
/// Alias for the default merge policy, which is the `LogMergePolicy`. /// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy; pub type DefaultMergePolicy = LogMergePolicy;
#[cfg(feature = "mmap")]
#[cfg(test)] #[cfg(test)]
mod tests_mmap { mod tests {
use crate::schema::{self, Schema}; use crate::schema::{self, Schema};
use crate::{Index, Term}; use crate::{Index, Term};
@@ -40,7 +39,7 @@ mod tests_mmap {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_from_tempdir(schema_builder.build()).unwrap(); let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// there must be one deleted document in the segment // there must be one deleted document in the segment
index_writer.add_document(doc!(text_field=>"b")); index_writer.add_document(doc!(text_field=>"b"));
index_writer.delete_term(Term::from_field_text(text_field, "b")); index_writer.delete_term(Term::from_field_text(text_field, "b"));

View File

@@ -9,15 +9,6 @@ pub struct DeleteOperation {
pub term: Term, pub term: Term,
} }
impl Default for DeleteOperation {
fn default() -> Self {
DeleteOperation {
opstamp: 0u64,
term: Term::new(),
}
}
}
/// Timestamped Add operation. /// Timestamped Add operation.
#[derive(Eq, PartialEq, Debug)] #[derive(Eq, PartialEq, Debug)]
pub struct AddOperation { pub struct AddOperation {

View File

@@ -36,6 +36,7 @@ impl SegmentSerializer {
}) })
} }
#[allow(dead_code)]
pub fn segment(&self) -> &Segment { pub fn segment(&self) -> &Segment {
&self.segment &self.segment
} }

View File

@@ -25,10 +25,9 @@ use futures::future::Future;
use futures::future::TryFutureExt; use futures::future::TryFutureExt;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashSet; use std::collections::HashSet;
use std::io::{self, Write}; use std::io::Write;
use std::ops::Deref; use std::ops::Deref;
use std::path::PathBuf; use std::path::PathBuf;
use std::process;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
@@ -44,7 +43,7 @@ const NUM_MERGE_THREADS: usize = 4;
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Result<()> { pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
save_metas( save_metas(
&IndexMeta { &IndexMeta {
segments: Vec::new(), segments: Vec::new(),
@@ -65,7 +64,7 @@ pub fn save_new_metas(schema: Schema, directory: &dyn Directory) -> crate::Resul
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> { fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
info!("save metas"); info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
@@ -155,7 +154,7 @@ pub(crate) struct InnerSegmentUpdater {
index: Index, index: Index,
segment_manager: SegmentManager, segment_manager: SegmentManager,
merge_policy: RwLock<Arc<dyn MergePolicy>>, merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
killed: AtomicBool, killed: AtomicBool,
stamper: Stamper, stamper: Stamper,
merge_operations: MergeOperationInventory, merge_operations: MergeOperationInventory,
@@ -194,19 +193,19 @@ impl SegmentUpdater {
merge_thread_pool, merge_thread_pool,
index, index,
segment_manager, segment_manager,
merge_policy: RwLock::new(Arc::new(DefaultMergePolicy::default())), merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
killed: AtomicBool::new(false), killed: AtomicBool::new(false),
stamper, stamper,
merge_operations: Default::default(), merge_operations: Default::default(),
}))) })))
} }
pub fn get_merge_policy(&self) -> Arc<dyn MergePolicy> { pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.merge_policy.read().unwrap().clone() self.merge_policy.read().unwrap().clone()
} }
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
let arc_merge_policy = Arc::from(merge_policy); let arc_merge_policy = Arc::new(merge_policy);
*self.merge_policy.write().unwrap() = arc_merge_policy; *self.merge_policy.write().unwrap() = arc_merge_policy;
} }
@@ -410,13 +409,6 @@ impl SegmentUpdater {
let _send_result = merging_future_send.send(segment_meta); let _send_result = merging_future_send.send(segment_meta);
} }
Err(e) => { Err(e) => {
if let crate::TantivyError::IOError(ref io_err) = &e {
if io_err.kind() == io::ErrorKind::InvalidData {
println!(" SEGMENTS THAT CAUSE THE BUG {:?}", merge_operation.segment_ids());
error!(" SEGMENTS THAT CAUSE THE BUG {:?}", merge_operation.segment_ids());
process::exit(1);
}
}
warn!( warn!(
"Merge of {:?} was cancelled: {:?}", "Merge of {:?} was cancelled: {:?}",
merge_operation.segment_ids().to_vec(), merge_operation.segment_ids().to_vec(),
@@ -431,9 +423,7 @@ impl SegmentUpdater {
}); });
Ok(merging_future_recv Ok(merging_future_recv
.unwrap_or_else(|e| { .unwrap_or_else(|_| Err(crate::TantivyError::SystemError("Merge failed".to_string()))))
Err(crate::TantivyError::SystemError("Merge failed".to_string()))
}))
} }
async fn consider_merge_options(&self) { async fn consider_merge_options(&self) {
@@ -460,8 +450,9 @@ impl SegmentUpdater {
.into_iter() .into_iter()
.map(|merge_candidate: MergeCandidate| { .map(|merge_candidate: MergeCandidate| {
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0) MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
}); })
merge_candidates.extend(committed_merge_candidates); .collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter());
for merge_operation in merge_candidates { for merge_operation in merge_candidates {
if let Err(err) = self.start_merge(merge_operation) { if let Err(err) = self.start_merge(merge_operation) {
@@ -564,7 +555,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(MergeWheneverPossible)); index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
{ {
@@ -617,7 +608,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{ {
for _ in 0..100 { for _ in 0..100 {
@@ -688,7 +679,7 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{ {
for _ in 0..100 { for _ in 0..100 {

View File

@@ -2,7 +2,7 @@ use super::operation::AddOperation;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter; use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter}; use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size; use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
@@ -14,8 +14,10 @@ use crate::schema::{Field, FieldEntry};
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream}; use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
use crate::tokenizer::{FacetTokenizer, TextAnalyzer}; use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{TokenStreamChain, Tokenizer}; use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::DocId;
use crate::Opstamp; use crate::Opstamp;
use crate::{DocId, SegmentComponent}; use std::io;
use std::str;
/// Computes the initial size of the hash table. /// Computes the initial size of the hash table.
/// ///
@@ -46,7 +48,6 @@ pub struct SegmentWriter {
fieldnorms_writer: FieldNormsWriter, fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>, doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<TextAnalyzer>>, tokenizers: Vec<Option<TextAnalyzer>>,
term_buffer: Term,
} }
impl SegmentWriter { impl SegmentWriter {
@@ -90,7 +91,6 @@ impl SegmentWriter {
fast_field_writers: FastFieldsWriter::from_schema(schema), fast_field_writers: FastFieldsWriter::from_schema(schema),
doc_opstamps: Vec::with_capacity(1_000), doc_opstamps: Vec::with_capacity(1_000),
tokenizers, tokenizers,
term_buffer: Term::new(),
}) })
} }
@@ -116,11 +116,7 @@ impl SegmentWriter {
/// Indexes a new document /// Indexes a new document
/// ///
/// As a user, you should rather use `IndexWriter`'s add_document. /// As a user, you should rather use `IndexWriter`'s add_document.
pub fn add_document( pub fn add_document(&mut self, add_operation: AddOperation, schema: &Schema) -> io::Result<()> {
&mut self,
add_operation: AddOperation,
schema: &Schema,
) -> crate::Result<()> {
let doc_id = self.max_doc; let doc_id = self.max_doc;
let mut doc = add_operation.document; let mut doc = add_operation.document;
self.doc_opstamps.push(add_operation.opstamp); self.doc_opstamps.push(add_operation.opstamp);
@@ -128,45 +124,34 @@ impl SegmentWriter {
self.fast_field_writers.add_document(&doc); self.fast_field_writers.add_document(&doc);
for (field, field_values) in doc.get_sorted_field_values() { for (field, field_values) in doc.get_sorted_field_values() {
let field_entry = schema.get_field_entry(field); let field_options = schema.get_field_entry(field);
let make_schema_error = || { if !field_options.is_indexed() {
crate::TantivyError::SchemaError(format!(
"Expected a {:?} for field {:?}",
field_entry.field_type().value_type(),
field_entry.name()
))
};
if !field_entry.is_indexed() {
continue; continue;
} }
let (term_buffer, multifield_postings) = match *field_options.field_type() {
(&mut self.term_buffer, &mut self.multifield_postings);
match *field_entry.field_type() {
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
term_buffer.set_field(field); let facets: Vec<&str> = field_values
let facets = .iter()
field_values .flat_map(|field_value| match *field_value.value() {
.iter() Value::Facet(ref facet) => Some(facet.encoded_str()),
.flat_map(|field_value| match *field_value.value() { _ => {
Value::Facet(ref facet) => Some(facet.encoded_str()), panic!("Expected hierarchical facet");
_ => { }
panic!("Expected hierarchical facet"); })
} .collect();
}); let mut term = Term::for_field(field); // we set the Term
for facet_str in facets { for fake_str in facets {
let mut unordered_term_id_opt = None; let mut unordered_term_id_opt = None;
FacetTokenizer FacetTokenizer.token_stream(fake_str).process(&mut |token| {
.token_stream(facet_str) term.set_text(&token.text);
.process(&mut |token| { let unordered_term_id =
term_buffer.set_text(&token.text); self.multifield_postings.subscribe(doc_id, &term);
let unordered_term_id = unordered_term_id_opt = Some(unordered_term_id);
multifield_postings.subscribe(doc_id, &term_buffer); });
unordered_term_id_opt = Some(unordered_term_id);
});
if let Some(unordered_term_id) = unordered_term_id_opt { if let Some(unordered_term_id) = unordered_term_id_opt {
self.fast_field_writers self.fast_field_writers
.get_multivalue_writer(field) .get_multivalue_writer(field)
.expect("writer for facet missing") .expect("multified writer for facet missing")
.add_val(unordered_term_id); .add_val(unordered_term_id);
} }
} }
@@ -183,6 +168,7 @@ impl SegmentWriter {
if let Some(last_token) = tok_str.tokens.last() { if let Some(last_token) = tok_str.tokens.last() {
total_offset += last_token.offset_to; total_offset += last_token.offset_to;
} }
token_streams token_streams
.push(PreTokenizedStream::from(tok_str.clone()).into()); .push(PreTokenizedStream::from(tok_str.clone()).into());
} }
@@ -192,6 +178,7 @@ impl SegmentWriter {
{ {
offsets.push(total_offset); offsets.push(total_offset);
total_offset += text.len(); total_offset += text.len();
token_streams.push(tokenizer.token_stream(text)); token_streams.push(tokenizer.token_stream(text));
} }
} }
@@ -203,12 +190,8 @@ impl SegmentWriter {
0 0
} else { } else {
let mut token_stream = TokenStreamChain::new(offsets, token_streams); let mut token_stream = TokenStreamChain::new(offsets, token_streams);
multifield_postings.index_text( self.multifield_postings
doc_id, .index_text(doc_id, field, &mut token_stream)
field,
&mut token_stream,
term_buffer,
)
}; };
self.fieldnorms_writer.record(doc_id, field, num_tokens); self.fieldnorms_writer.record(doc_id, field, num_tokens);
@@ -216,67 +199,49 @@ impl SegmentWriter {
FieldType::U64(ref int_option) => { FieldType::U64(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_u64(
let u64_val = field_value field_value.field(),
.value() field_value.value().u64_value(),
.u64_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_u64(u64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::Date(ref int_option) => { FieldType::Date(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_i64(
let date_val = field_value field_value.field(),
.value() field_value.value().date_value().timestamp(),
.date_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_i64(date_val.timestamp());
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::I64(ref int_option) => { FieldType::I64(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_i64(
let i64_val = field_value field_value.field(),
.value() field_value.value().i64_value(),
.i64_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_i64(i64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::F64(ref int_option) => { FieldType::F64(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {
term_buffer.set_field(field_value.field()); let term = Term::from_field_f64(
let f64_val = field_value field_value.field(),
.value() field_value.value().f64_value(),
.f64_value() );
.ok_or_else(make_schema_error)?; self.multifield_postings.subscribe(doc_id, &term);
term_buffer.set_f64(f64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
} }
} }
} }
FieldType::Bytes(ref option) => { FieldType::Bytes => {
if option.is_indexed() { // Do nothing. Bytes only supports fast fields.
for field_value in field_values {
term_buffer.set_field(field_value.field());
let bytes = field_value
.value()
.bytes_value()
.ok_or_else(make_schema_error)?;
term_buffer.set_bytes(bytes);
self.multifield_postings.subscribe(doc_id, &term_buffer);
}
}
} }
} }
} }
@@ -319,12 +284,7 @@ fn write(
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() { if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
fieldnorms_writer.serialize(fieldnorms_serializer)?; fieldnorms_writer.serialize(fieldnorms_serializer)?;
} }
let fieldnorm_data = serializer let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let term_ord_map =
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?; fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
serializer.close()?; serializer.close()?;
Ok(()) Ok(())

View File

@@ -105,7 +105,7 @@ extern crate serde_json;
extern crate log; extern crate log;
#[macro_use] #[macro_use]
extern crate thiserror; extern crate failure;
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
extern crate test; extern crate test;
@@ -134,7 +134,7 @@ mod core;
mod indexer; mod indexer;
#[allow(unused_doc_comments)] #[allow(unused_doc_comments)]
pub mod error; mod error;
pub mod tokenizer; pub mod tokenizer;
pub mod collector; pub mod collector;
@@ -157,7 +157,6 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, TERMINATED}; pub use self::docset::{DocSet, TERMINATED};
pub use crate::common::HasLen;
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::{Executor, SegmentComponent}; pub use crate::core::{Executor, SegmentComponent};
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
@@ -174,7 +173,7 @@ use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Index format version. /// Index format version.
const INDEX_FORMAT_VERSION: u32 = 3; const INDEX_FORMAT_VERSION: u32 = 1;
/// Structure version for the index. /// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -246,10 +245,11 @@ pub type DocId = u32;
/// with opstamp `n+1`. /// with opstamp `n+1`.
pub type Opstamp = u64; pub type Opstamp = u64;
/// A Score that represents the relevance of the document to the query /// A f32 that represents the relevance of the document to the query
/// ///
/// This is modelled internally as a `f32`. The larger the number, the more relevant /// This is modelled internally as a `f32`. The
/// the document to the search query. /// larger the number, the more relevant the document
/// to the search
pub type Score = f32; pub type Score = f32;
/// A `SegmentLocalId` identifies a segment. /// A `SegmentLocalId` identifies a segment.
@@ -277,11 +277,12 @@ impl DocAddress {
/// ///
/// The id used for the segment is actually an ordinal /// The id used for the segment is actually an ordinal
/// in the list of `Segment`s held by a `Searcher`. /// in the list of `Segment`s held by a `Searcher`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct DocAddress(pub SegmentLocalId, pub DocId); pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE; use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::docset::{DocSet, TERMINATED}; use crate::docset::{DocSet, TERMINATED};
@@ -289,6 +290,7 @@ mod tests {
use crate::schema::*; use crate::schema::*;
use crate::DocAddress; use crate::DocAddress;
use crate::Index; use crate::Index;
use crate::IndexWriter;
use crate::Postings; use crate::Postings;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use rand::distributions::Bernoulli; use rand::distributions::Bernoulli;
@@ -353,14 +355,14 @@ mod tests {
#[test] #[test]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() -> crate::Result<()> { fn test_indexing() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap(); let index = Index::create_from_tempdir(schema).unwrap();
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{ {
let doc = doc!(text_field=>"af b"); let doc = doc!(text_field=>"af b");
index_writer.add_document(doc); index_writer.add_document(doc);
@@ -375,76 +377,100 @@ mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
Ok(())
} }
#[test] #[test]
fn test_docfreq1() -> crate::Result<()> { fn test_docfreq1() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c")); {
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!(text_field=>"a")); index_writer.commit().unwrap();
index_writer.add_document(doc!(text_field=>"a a")); }
index_writer.commit()?; {
index_writer.add_document(doc!(text_field=>"c")); index_writer.add_document(doc!(text_field=>"a"));
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a a"));
let reader = index.reader()?; index_writer.commit().unwrap();
let searcher = reader.searcher(); }
let term_a = Term::from_field_text(text_field, "a"); {
assert_eq!(searcher.doc_freq(&term_a)?, 3); index_writer.add_document(doc!(text_field=>"c"));
let term_b = Term::from_field_text(text_field, "b"); index_writer.commit().unwrap();
assert_eq!(searcher.doc_freq(&term_b)?, 1); }
let term_c = Term::from_field_text(text_field, "c"); {
assert_eq!(searcher.doc_freq(&term_c)?, 2); let reader = index.reader().unwrap();
let term_d = Term::from_field_text(text_field, "d"); let searcher = reader.searcher();
assert_eq!(searcher.doc_freq(&term_d)?, 0); let term_a = Term::from_field_text(text_field, "a");
Ok(()) assert_eq!(searcher.doc_freq(&term_a), 3);
let term_b = Term::from_field_text(text_field, "b");
assert_eq!(searcher.doc_freq(&term_b), 1);
let term_c = Term::from_field_text(text_field, "c");
assert_eq!(searcher.doc_freq(&term_c), 2);
let term_d = Term::from_field_text(text_field, "d");
assert_eq!(searcher.doc_freq(&term_d), 0);
}
} }
#[test] #[test]
fn test_fieldnorm_no_docs_with_field() -> crate::Result<()> { fn test_fieldnorm_no_docs_with_field() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title_field = schema_builder.add_text_field("title", TEXT); let title_field = schema_builder.add_text_field("title", TEXT);
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?;
let index_reader = index.reader()?;
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
{ {
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert_eq!(fieldnorm_reader.fieldnorm(0), 3); {
let doc = doc!(text_field=>"a b c");
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
} }
{ {
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?; let index_reader = index.reader().unwrap();
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0); let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
{
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
}
{
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field);
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
}
} }
Ok(())
} }
#[test] #[test]
fn test_fieldnorm() -> crate::Result<()> { fn test_fieldnorm() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?; {
index_writer.add_document(doc!(text_field=>"a b c")); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!()); {
index_writer.add_document(doc!(text_field=>"a b")); let doc = doc!(text_field=>"a b c");
index_writer.commit()?; index_writer.add_document(doc);
let reader = index.reader()?; }
let searcher = reader.searcher(); {
let segment_reader: &SegmentReader = searcher.segment_reader(0); let doc = doc!();
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?; index_writer.add_document(doc);
assert_eq!(fieldnorms_reader.fieldnorm(0), 3); }
assert_eq!(fieldnorms_reader.fieldnorm(1), 0); {
assert_eq!(fieldnorms_reader.fieldnorm(2), 2); let doc = doc!(text_field=>"a b");
Ok(()) index_writer.add_document(doc);
}
index_writer.commit().unwrap();
}
{
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
}
} }
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool { fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
@@ -459,7 +485,7 @@ mod tests {
} }
#[test] #[test]
fn test_delete_postings1() -> crate::Result<()> { fn test_delete_postings1() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let term_abcd = Term::from_field_text(text_field, "abcd"); let term_abcd = Term::from_field_text(text_field, "abcd");
@@ -475,7 +501,7 @@ mod tests {
.unwrap(); .unwrap();
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// 0 // 0
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
// 1 // 1
@@ -491,19 +517,19 @@ mod tests {
index_writer.add_document(doc!(text_field=>" b c")); index_writer.add_document(doc!(text_field=>" b c"));
// 5 // 5
index_writer.add_document(doc!(text_field=>" a")); index_writer.add_document(doc!(text_field=>" a"));
index_writer.commit()?; index_writer.commit().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(text_field)?; let inverted_index = segment_reader.inverted_index(text_field);
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 5); assert_eq!(postings.doc(), 5);
@@ -511,7 +537,7 @@ mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
@@ -522,25 +548,25 @@ mod tests {
} }
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// 0 // 0
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
// 1 // 1
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback()?; index_writer.rollback().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let seg_reader = searcher.segment_reader(0); let seg_reader = searcher.segment_reader(0);
let inverted_index = seg_reader.inverted_index(term_abcd.field())?; let inverted_index = seg_reader.inverted_index(term_abcd.field());
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, seg_reader)); assert!(advance_undeleted(&mut postings, seg_reader));
assert_eq!(postings.doc(), 5); assert_eq!(postings.doc(), 5);
@@ -548,7 +574,7 @@ mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, seg_reader)); assert!(advance_undeleted(&mut postings, seg_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
@@ -559,30 +585,30 @@ mod tests {
} }
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b")); index_writer.add_document(doc!(text_field=>"a b"));
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback()?; index_writer.rollback().unwrap();
index_writer.delete_term(Term::from_field_text(text_field, "a")); index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.commit()?; index_writer.commit().unwrap();
} }
{ {
reader.reload()?; reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(term_abcd.field())?; let inverted_index = segment_reader.inverted_index(term_abcd.field());
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(!advance_undeleted(&mut postings, segment_reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
@@ -592,107 +618,101 @@ mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, segment_reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 4); assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, segment_reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
} }
Ok(())
} }
#[test] #[test]
fn test_indexed_u64() -> crate::Result<()> { fn test_indexed_u64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("value", INDEXED); let field = schema_builder.add_u64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64)); index_writer.add_document(doc!(field=>1u64));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_u64(field, 1u64); let term = Term::from_field_u64(field, 1u64);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings(&term, IndexRecordOption::Basic)? .read_postings(&term, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
Ok(())
} }
#[test] #[test]
fn test_indexed_i64() -> crate::Result<()> { fn test_indexed_i64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_i64_field("value", INDEXED); let value_field = schema_builder.add_i64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let negative_val = -1i64; let negative_val = -1i64;
index_writer.add_document(doc!(value_field => negative_val)); index_writer.add_document(doc!(value_field => negative_val));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_i64(value_field, negative_val); let term = Term::from_field_i64(value_field, negative_val);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings(&term, IndexRecordOption::Basic)? .read_postings(&term, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
Ok(())
} }
#[test] #[test]
fn test_indexed_f64() -> crate::Result<()> { fn test_indexed_f64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_f64_field("value", INDEXED); let value_field = schema_builder.add_f64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let val = std::f64::consts::PI; let val = std::f64::consts::PI;
index_writer.add_document(doc!(value_field => val)); index_writer.add_document(doc!(value_field => val));
index_writer.commit()?; index_writer.commit().unwrap();
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let term = Term::from_field_f64(value_field, val); let term = Term::from_field_f64(value_field, val);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
.inverted_index(term.field())? .inverted_index(term.field())
.read_postings(&term, IndexRecordOption::Basic)? .read_postings(&term, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
Ok(())
} }
#[test] #[test]
fn test_indexedfield_not_in_documents() -> crate::Result<()> { fn test_indexedfield_not_in_documents() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let absent_field = schema_builder.add_text_field("text", TEXT); let absent_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a")); index_writer.add_document(doc!(text_field=>"a"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(absent_field)?; segment_reader.inverted_index(absent_field); //< should not panic
assert_eq!(inverted_index.terms().num_terms(), 0);
Ok(())
} }
#[test] #[test]
fn test_delete_postings2() -> crate::Result<()> { fn test_delete_postings2() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -700,112 +720,125 @@ mod tests {
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into()?; .try_into()
.unwrap();
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"63"));
index_writer.add_document(doc!(text_field=>"70")); let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
index_writer.add_document(doc!(text_field=>"34")); let doc = doc!(text_field=>val);
index_writer.add_document(doc!(text_field=>"1")); index_writer.add_document(doc);
index_writer.add_document(doc!(text_field=>"38")); };
index_writer.add_document(doc!(text_field=>"33"));
index_writer.add_document(doc!(text_field=>"40")); let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
index_writer.add_document(doc!(text_field=>"17")); let delterm = Term::from_field_text(text_field, val);
index_writer.delete_term(Term::from_field_text(text_field, "38")); index_writer.delete_term(delterm);
index_writer.delete_term(Term::from_field_text(text_field, "34")); };
index_writer.commit()?;
reader.reload()?; add_document(&mut index_writer, "63");
assert_eq!(reader.searcher().num_docs(), 6); add_document(&mut index_writer, "70");
Ok(()) add_document(&mut index_writer, "34");
add_document(&mut index_writer, "1");
add_document(&mut index_writer, "38");
add_document(&mut index_writer, "33");
add_document(&mut index_writer, "40");
add_document(&mut index_writer, "17");
remove_document(&mut index_writer, "38");
remove_document(&mut index_writer, "34");
index_writer.commit().unwrap();
reader.reload().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 6);
} }
#[test] #[test]
fn test_termfreq() -> crate::Result<()> { fn test_termfreq() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"af af af bc bc")); let doc = doc!(text_field=>"af af af bc bc");
index_writer.commit()?; index_writer.add_document(doc);
index_writer.commit().unwrap();
} }
{ {
let index_reader = index.reader()?; let index_reader = index.reader().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field)?; let inverted_index = reader.inverted_index(text_field);
let term_abcd = Term::from_field_text(text_field, "abcd"); let term_abcd = Term::from_field_text(text_field, "abcd");
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
let term_af = Term::from_field_text(text_field, "af"); let term_af = Term::from_field_text(text_field, "af");
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_af, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.term_freq(), 3); assert_eq!(postings.term_freq(), 3);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
} }
Ok(())
} }
#[test] #[test]
fn test_searcher_1() -> crate::Result<()> { fn test_searcher_1() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let reader = index.reader()?; let reader = index.reader().unwrap();
// writing the segment {
let mut index_writer = index.writer_for_tests()?; // writing the segment
index_writer.add_document(doc!(text_field=>"af af af b")); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c")); index_writer.add_document(doc!(text_field=>"af af af b"));
index_writer.add_document(doc!(text_field=>"a b c d")); index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a b c d"));
index_writer.commit().unwrap();
reader.reload()?; }
let searcher = reader.searcher(); {
let get_doc_ids = |terms: Vec<Term>| { reader.reload().unwrap();
let query = BooleanQuery::new_multiterms_query(terms); let searcher = reader.searcher();
searcher let get_doc_ids = |terms: Vec<Term>| {
.search(&query, &TEST_COLLECTOR_WITH_SCORE) let query = BooleanQuery::new_multiterms_query(terms);
.map(|topdocs| topdocs.docs().to_vec()) let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
}; topdocs.docs().to_vec()
assert_eq!( };
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?, assert_eq!(
vec![DocAddress(0, 1), DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
); vec![DocAddress(0, 1), DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?, assert_eq!(
vec![DocAddress(0, 0)] get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
); vec![DocAddress(0, 0)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?, assert_eq!(
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
); vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "c")])?, assert_eq!(
vec![DocAddress(0, 1), DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
); vec![DocAddress(0, 1), DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![Term::from_field_text(text_field, "d")])?, assert_eq!(
vec![DocAddress(0, 2)] get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
); vec![DocAddress(0, 2)]
assert_eq!( );
get_doc_ids(vec![ assert_eq!(
Term::from_field_text(text_field, "b"), get_doc_ids(vec![
Term::from_field_text(text_field, "a"), Term::from_field_text(text_field, "b"),
])?, Term::from_field_text(text_field, "a"),
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)] ]),
); vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
Ok(()) );
}
} }
#[test] #[test]
fn test_searcher_2() -> crate::Result<()> { fn test_searcher_2() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
@@ -813,17 +846,19 @@ mod tests {
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into()?; .try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0u64); assert_eq!(reader.searcher().num_docs(), 0u64);
// writing the segment {
let mut index_writer = index.writer_for_tests()?; // writing the segment
index_writer.add_document(doc!(text_field=>"af b")); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c")); index_writer.add_document(doc!(text_field=>"af b"));
index_writer.add_document(doc!(text_field=>"a b c d")); index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?; index_writer.add_document(doc!(text_field=>"a b c d"));
reader.reload()?; index_writer.commit().unwrap();
}
reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 3u64); assert_eq!(reader.searcher().num_docs(), 3u64);
Ok(())
} }
#[test] #[test]
@@ -835,17 +870,17 @@ mod tests {
text_field => "some other value", text_field => "some other value",
other_text_field => "short"); other_text_field => "short");
assert_eq!(document.len(), 3); assert_eq!(document.len(), 3);
let values: Vec<&Value> = document.get_all(text_field).collect(); let values = document.get_all(text_field);
assert_eq!(values.len(), 2); assert_eq!(values.len(), 2);
assert_eq!(values[0].text(), Some("tantivy")); assert_eq!(values[0].text(), Some("tantivy"));
assert_eq!(values[1].text(), Some("some other value")); assert_eq!(values[1].text(), Some("some other value"));
let values: Vec<&Value> = document.get_all(other_text_field).collect(); let values = document.get_all(other_text_field);
assert_eq!(values.len(), 1); assert_eq!(values.len(), 1);
assert_eq!(values[0].text(), Some("short")); assert_eq!(values[0].text(), Some("short"));
} }
#[test] #[test]
fn test_wrong_fast_field_type() -> crate::Result<()> { fn test_wrong_fast_field_type() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST); let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST); let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
@@ -855,14 +890,14 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
{ {
let document = let document =
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64); doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
index_writer.add_document(document); index_writer.add_document(document);
index_writer.commit()?; index_writer.commit().unwrap();
} }
let reader = index.reader()?; let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0); let segment_reader: &SegmentReader = searcher.segment_reader(0);
{ {
@@ -901,12 +936,11 @@ mod tests {
let fast_field_reader = fast_field_reader_opt.unwrap(); let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get(0), 4f64) assert_eq!(fast_field_reader.get(0), 4f64)
} }
Ok(())
} }
// motivated by #729 // motivated by #729
#[test] #[test]
fn test_update_via_delete_insert() -> crate::Result<()> { fn test_update_via_delete_insert() {
use crate::collector::Count; use crate::collector::Count;
use crate::indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use crate::query::AllQuery; use crate::query::AllQuery;
@@ -920,17 +954,17 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
let index_reader = index.reader()?; let index_reader = index.reader().unwrap();
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
for doc_id in 0u64..DOC_COUNT { for doc_id in 0u64..DOC_COUNT {
index_writer.add_document(doc!(id => doc_id)); index_writer.add_document(doc!(id => doc_id));
} }
index_writer.commit()?; index_writer.commit().unwrap();
index_reader.reload()?; index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
assert_eq!( assert_eq!(
@@ -941,11 +975,12 @@ mod tests {
// update the 10 elements by deleting and re-adding // update the 10 elements by deleting and re-adding
for doc_id in 0u64..DOC_COUNT { for doc_id in 0u64..DOC_COUNT {
index_writer.delete_term(Term::from_field_u64(id, doc_id)); index_writer.delete_term(Term::from_field_u64(id, doc_id));
index_writer.commit()?; index_writer.commit().unwrap();
index_reader.reload()?; index_reader.reload().unwrap();
index_writer.add_document(doc!(id => doc_id)); let doc = doc!(id => doc_id);
index_writer.commit()?; index_writer.add_document(doc);
index_reader.reload()?; index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
// The number of document should be stable. // The number of document should be stable.
assert_eq!( assert_eq!(
@@ -954,7 +989,7 @@ mod tests {
); );
} }
index_reader.reload()?; index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
let segment_ids: Vec<SegmentId> = searcher let segment_ids: Vec<SegmentId> = searcher
.segment_readers() .segment_readers()
@@ -963,18 +998,12 @@ mod tests {
.collect(); .collect();
block_on(index_writer.merge(&segment_ids)).unwrap(); block_on(index_writer.merge(&segment_ids)).unwrap();
index_reader.reload()?; index_reader.reload().unwrap();
let searcher = index_reader.searcher(); let searcher = index_reader.searcher();
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
Ok(())
}
#[test] assert_eq!(
fn test_validate_checksum() -> crate::Result<()> { searcher.search(&AllQuery, &Count).unwrap(),
let index_path = tempfile::tempdir().expect("dir"); DOC_COUNT as usize
let schema = Schema::builder().build(); );
let index = Index::create_in_dir(&index_path, schema)?;
assert!(index.validate_checksum()?.is_empty());
Ok(())
} }
} }

View File

@@ -38,11 +38,11 @@ const LONG_SKIP_INTERVAL: u64 = (LONG_SKIP_IN_BLOCKS * COMPRESSION_BLOCK_SIZE) a
pub mod tests { pub mod tests {
use super::PositionSerializer; use super::PositionSerializer;
use crate::directory::ReadOnlySource;
use crate::positions::reader::PositionReader; use crate::positions::reader::PositionReader;
use crate::{common::HasLen, directory::FileSlice};
use std::iter; use std::iter;
fn create_stream_buffer(vals: &[u32]) -> (FileSlice, FileSlice) { fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
let mut skip_buffer = vec![]; let mut skip_buffer = vec![];
let mut stream_buffer = vec![]; let mut stream_buffer = vec![];
{ {
@@ -53,7 +53,10 @@ pub mod tests {
} }
serializer.close().unwrap(); serializer.close().unwrap();
} }
(FileSlice::from(stream_buffer), FileSlice::from(skip_buffer)) (
ReadOnlySource::from(stream_buffer),
ReadOnlySource::from(skip_buffer),
)
} }
#[test] #[test]
@@ -62,7 +65,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 12); assert_eq!(skip.len(), 12);
assert_eq!(stream.len(), 1168); assert_eq!(stream.len(), 1168);
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 0u64);
for &n in &[1, 10, 127, 128, 130, 312] { for &n in &[1, 10, 127, 128, 130, 312] {
let mut v = vec![0u32; n]; let mut v = vec![0u32; n];
position_reader.read(0, &mut v[..]); position_reader.read(0, &mut v[..]);
@@ -78,7 +81,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 12); assert_eq!(skip.len(), 12);
assert_eq!(stream.len(), 1168); assert_eq!(stream.len(), 1168);
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 0u64);
for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] { for &offset in &[1u64, 10u64, 127u64, 128u64, 130u64, 312u64] {
for &len in &[1, 10, 130, 500] { for &len in &[1, 10, 130, 500] {
let mut v = vec![0u32; len]; let mut v = vec![0u32; len];
@@ -97,7 +100,7 @@ pub mod tests {
assert_eq!(skip.len(), 12); assert_eq!(skip.len(), 12);
assert_eq!(stream.len(), 1168); assert_eq!(stream.len(), 1168);
let mut position_reader = PositionReader::new(stream, skip, 0u64).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 0u64);
let mut buf = [0u32; 7]; let mut buf = [0u32; 7];
let mut c = 0; let mut c = 0;
@@ -119,7 +122,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 15_749); assert_eq!(skip.len(), 15_749);
assert_eq!(stream.len(), 4_987_872); assert_eq!(stream.len(), 4_987_872);
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap(); let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
let mut buf = [0u32; 256]; let mut buf = [0u32; 256];
position_reader.read(128, &mut buf); position_reader.read(128, &mut buf);
for i in 0..256 { for i in 0..256 {
@@ -139,8 +142,7 @@ pub mod tests {
assert_eq!(skip.len(), 15_749); assert_eq!(skip.len(), 15_749);
assert_eq!(stream.len(), 4_987_872); assert_eq!(stream.len(), 4_987_872);
let mut buf = [0u32; 1]; let mut buf = [0u32; 1];
let mut position_reader = let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 200_000);
PositionReader::new(stream.clone(), skip.clone(), 200_000).unwrap();
position_reader.read(230, &mut buf); position_reader.read(230, &mut buf);
position_reader.read(9, &mut buf); position_reader.read(9, &mut buf);
} }
@@ -155,7 +157,7 @@ pub mod tests {
} }
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
let mut buf = Vec::new(); let mut buf = Vec::new();
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0).unwrap(); let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), 0);
let mut offset = 0; let mut offset = 0;
for i in 1..24 { for i in 1..24 {
buf.resize(i, 0); buf.resize(i, 0);
@@ -173,7 +175,7 @@ pub mod tests {
let (stream, skip) = create_stream_buffer(&v[..]); let (stream, skip) = create_stream_buffer(&v[..]);
assert_eq!(skip.len(), 15_749); assert_eq!(skip.len(), 15_749);
assert_eq!(stream.len(), 1_000_000); assert_eq!(stream.len(), 1_000_000);
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024).unwrap(); let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
let mut buf = [0u32; 1]; let mut buf = [0u32; 1];
position_reader.read(0, &mut buf); position_reader.read(0, &mut buf);
assert_eq!(buf[0], CONST_VAL); assert_eq!(buf[0], CONST_VAL);
@@ -192,8 +194,7 @@ pub mod tests {
128 * 1024 + 7, 128 * 1024 + 7,
128 * 10 * 1024 + 10, 128 * 10 * 1024 + 10,
] { ] {
let mut position_reader = let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
PositionReader::new(stream.clone(), skip.clone(), offset).unwrap();
let mut buf = [0u32; 1]; let mut buf = [0u32; 1];
position_reader.read(0, &mut buf); position_reader.read(0, &mut buf);
assert_eq!(buf[0], offset as u32); assert_eq!(buf[0], offset as u32);

View File

@@ -1,13 +1,8 @@
use std::io;
use crate::common::{BinarySerializable, FixedSize}; use crate::common::{BinarySerializable, FixedSize};
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::positions::COMPRESSION_BLOCK_SIZE; use crate::positions::COMPRESSION_BLOCK_SIZE;
use crate::positions::LONG_SKIP_INTERVAL; use crate::positions::LONG_SKIP_INTERVAL;
use crate::positions::LONG_SKIP_IN_BLOCKS; use crate::positions::LONG_SKIP_IN_BLOCKS;
use bitpacking::{BitPacker, BitPacker4x};
/// Positions works as a long sequence of compressed block. /// Positions works as a long sequence of compressed block.
/// All terms are chained one after the other. /// All terms are chained one after the other.
/// ///
@@ -28,28 +23,28 @@ use bitpacking::{BitPacker, BitPacker4x};
/// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`, /// A given block obviously takes `(128 x num_bit_for_the_block / num_bits_in_a_byte)`,
/// so skipping a block without decompressing it is just a matter of advancing that many /// so skipping a block without decompressing it is just a matter of advancing that many
/// bytes. /// bytes.
use bitpacking::{BitPacker, BitPacker4x};
use owned_read::OwnedRead;
struct Positions { struct Positions {
bit_packer: BitPacker4x, bit_packer: BitPacker4x,
skip_file: FileSlice, skip_source: ReadOnlySource,
position_file: FileSlice, position_source: ReadOnlySource,
long_skip_data: OwnedBytes, long_skip_source: ReadOnlySource,
} }
impl Positions { impl Positions {
pub fn new(position_file: FileSlice, skip_file: FileSlice) -> io::Result<Positions> { pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
let (body, footer) = skip_file.split_from_end(u32::SIZE_IN_BYTES); let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
let footer_data = footer.read_bytes()?; let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
let num_long_skips = u32::deserialize(&mut footer_data.as_slice())?; let (skip_source, long_skip_source) =
let (skip_file, long_skip_file) =
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize)); body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
let long_skip_data = long_skip_file.read_bytes()?; Positions {
Ok(Positions {
bit_packer: BitPacker4x::new(), bit_packer: BitPacker4x::new(),
skip_file, skip_source,
long_skip_data, long_skip_source,
position_file, position_source,
}) }
} }
/// Returns the offset of the block associated to the given `long_skip_id`. /// Returns the offset of the block associated to the given `long_skip_id`.
@@ -59,23 +54,19 @@ impl Positions {
if long_skip_id == 0 { if long_skip_id == 0 {
return 0; return 0;
} }
let long_skip_slice = self.long_skip_data.as_slice(); let long_skip_slice = self.long_skip_source.as_slice();
let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8]; let mut long_skip_blocks: &[u8] = &long_skip_slice[(long_skip_id - 1) * 8..][..8];
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted") u64::deserialize(&mut long_skip_blocks).expect("Index corrupted")
} }
fn reader(&self, offset: u64) -> io::Result<PositionReader> { fn reader(&self, offset: u64) -> PositionReader {
let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize; let long_skip_id = (offset / LONG_SKIP_INTERVAL) as usize;
let offset_num_bytes: u64 = self.long_skip(long_skip_id); let offset_num_bytes: u64 = self.long_skip(long_skip_id);
let position_read = self let mut position_read = OwnedRead::new(self.position_source.clone());
.position_file position_read.advance(offset_num_bytes as usize);
.slice_from(offset_num_bytes as usize) let mut skip_read = OwnedRead::new(self.skip_source.clone());
.read_bytes()?; skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
let skip_read = self PositionReader {
.skip_file
.slice_from(long_skip_id * LONG_SKIP_IN_BLOCKS)
.read_bytes()?;
Ok(PositionReader {
bit_packer: self.bit_packer, bit_packer: self.bit_packer,
skip_read, skip_read,
position_read, position_read,
@@ -83,14 +74,13 @@ impl Positions {
block_offset: std::i64::MAX as u64, block_offset: std::i64::MAX as u64,
anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL, anchor_offset: (long_skip_id as u64) * LONG_SKIP_INTERVAL,
abs_offset: offset, abs_offset: offset,
}) }
} }
} }
#[derive(Clone)]
pub struct PositionReader { pub struct PositionReader {
skip_read: OwnedBytes, skip_read: OwnedRead,
position_read: OwnedBytes, position_read: OwnedRead,
bit_packer: BitPacker4x, bit_packer: BitPacker4x,
buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>, buffer: Box<[u32; COMPRESSION_BLOCK_SIZE]>,
@@ -102,12 +92,11 @@ pub struct PositionReader {
impl PositionReader { impl PositionReader {
pub fn new( pub fn new(
position_file: FileSlice, position_source: ReadOnlySource,
skip_file: FileSlice, skip_source: ReadOnlySource,
offset: u64, offset: u64,
) -> io::Result<PositionReader> { ) -> PositionReader {
let positions = Positions::new(position_file, skip_file)?; Positions::new(position_source, skip_source).reader(offset)
positions.reader(offset)
} }
fn advance_num_blocks(&mut self, num_blocks: usize) { fn advance_num_blocks(&mut self, num_blocks: usize) {
@@ -141,7 +130,7 @@ impl PositionReader {
self.advance_num_blocks(num_blocks_to_skip); self.advance_num_blocks(num_blocks_to_skip);
self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64); self.anchor_offset = offset - (offset % COMPRESSION_BLOCK_SIZE as u64);
self.block_offset = self.anchor_offset; self.block_offset = self.anchor_offset;
let num_bits = self.skip_read.as_slice()[0]; let num_bits = self.skip_read.get(0);
self.bit_packer self.bit_packer
.decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits); .decompress(self.position_read.as_ref(), self.buffer.as_mut(), num_bits);
} else { } else {
@@ -151,7 +140,7 @@ impl PositionReader {
self.anchor_offset = self.block_offset; self.anchor_offset = self.block_offset;
} }
let mut num_bits = self.skip_read.as_slice()[0]; let mut num_bits = self.skip_read.get(0);
let mut position_data = self.position_read.as_ref(); let mut position_data = self.position_read.as_ref();
for i in 1.. { for i in 1.. {
@@ -165,7 +154,7 @@ impl PositionReader {
output = &mut output[remaining_in_block..]; output = &mut output[remaining_in_block..];
offset += remaining_in_block as u64; offset += remaining_in_block as u64;
position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..]; position_data = &position_data[(num_bits as usize * COMPRESSION_BLOCK_SIZE / 8)..];
num_bits = self.skip_read.as_slice()[i]; num_bits = self.skip_read.get(i);
self.bit_packer self.bit_packer
.decompress(position_data, self.buffer.as_mut(), num_bits); .decompress(position_data, self.buffer.as_mut(), num_bits);
self.block_offset += COMPRESSION_BLOCK_SIZE as u64; self.block_offset += COMPRESSION_BLOCK_SIZE as u64;

View File

@@ -8,7 +8,7 @@ use std::io::{self, Write};
pub struct PositionSerializer<W: io::Write> { pub struct PositionSerializer<W: io::Write> {
bit_packer: BitPacker4x, bit_packer: BitPacker4x,
write_stream: CountingWriter<W>, write_stream: CountingWriter<W>,
write_skip_index: W, write_skiplist: W,
block: Vec<u32>, block: Vec<u32>,
buffer: Vec<u8>, buffer: Vec<u8>,
num_ints: u64, num_ints: u64,
@@ -16,11 +16,11 @@ pub struct PositionSerializer<W: io::Write> {
} }
impl<W: io::Write> PositionSerializer<W> { impl<W: io::Write> PositionSerializer<W> {
pub fn new(write_stream: W, write_skip_index: W) -> PositionSerializer<W> { pub fn new(write_stream: W, write_skiplist: W) -> PositionSerializer<W> {
PositionSerializer { PositionSerializer {
bit_packer: BitPacker4x::new(), bit_packer: BitPacker4x::new(),
write_stream: CountingWriter::wrap(write_stream), write_stream: CountingWriter::wrap(write_stream),
write_skip_index, write_skiplist,
block: Vec::with_capacity(128), block: Vec::with_capacity(128),
buffer: vec![0u8; 128 * 4], buffer: vec![0u8; 128 * 4],
num_ints: 0u64, num_ints: 0u64,
@@ -52,7 +52,7 @@ impl<W: io::Write> PositionSerializer<W> {
fn flush_block(&mut self) -> io::Result<()> { fn flush_block(&mut self) -> io::Result<()> {
let num_bits = self.bit_packer.num_bits(&self.block[..]); let num_bits = self.bit_packer.num_bits(&self.block[..]);
self.write_skip_index.write_all(&[num_bits])?; self.write_skiplist.write_all(&[num_bits])?;
let written_len = self let written_len = self
.bit_packer .bit_packer
.compress(&self.block[..], &mut self.buffer, num_bits); .compress(&self.block[..], &mut self.buffer, num_bits);
@@ -70,10 +70,10 @@ impl<W: io::Write> PositionSerializer<W> {
self.flush_block()?; self.flush_block()?;
} }
for &long_skip in &self.long_skips { for &long_skip in &self.long_skips {
long_skip.serialize(&mut self.write_skip_index)?; long_skip.serialize(&mut self.write_skiplist)?;
} }
(self.long_skips.len() as u32).serialize(&mut self.write_skip_index)?; (self.long_skips.len() as u32).serialize(&mut self.write_skiplist)?;
self.write_skip_index.flush()?; self.write_skiplist.flush()?;
self.write_stream.flush()?; self.write_stream.flush()?;
Ok(()) Ok(())
} }

View File

@@ -1,24 +1,11 @@
use std::io;
use crate::common::{BinarySerializable, VInt}; use crate::common::{BinarySerializable, VInt};
use crate::directory::FileSlice; use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::fieldnorm::FieldNormReader;
use crate::postings::compression::{ use crate::postings::compression::{
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE, AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
}; };
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader}; use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED}; use crate::{DocId, TERMINATED};
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
if let Some(first) = it.next() {
Some(it.fold(first, Score::max))
} else {
None
}
}
/// `BlockSegmentPostings` is a cursor iterating over blocks /// `BlockSegmentPostings` is a cursor iterating over blocks
/// of documents. /// of documents.
@@ -27,18 +14,16 @@ fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
/// ///
/// While it is useful for some very specific high-performance /// While it is useful for some very specific high-performance
/// use cases, you should prefer using `SegmentPostings` for most usage. /// use cases, you should prefer using `SegmentPostings` for most usage.
#[derive(Clone)]
pub struct BlockSegmentPostings { pub struct BlockSegmentPostings {
pub(crate) doc_decoder: BlockDecoder, pub(crate) doc_decoder: BlockDecoder,
loaded_offset: usize, loaded_offset: usize,
freq_decoder: BlockDecoder, freq_decoder: BlockDecoder,
freq_reading_option: FreqReadingOption, freq_reading_option: FreqReadingOption,
block_max_score_cache: Option<Score>,
doc_freq: u32, doc_freq: usize,
data: OwnedBytes, data: ReadOnlySource,
pub(crate) skip_reader: SkipReader, skip_reader: SkipReader,
} }
fn decode_bitpacked_block( fn decode_bitpacked_block(
@@ -62,104 +47,59 @@ fn decode_vint_block(
doc_offset: DocId, doc_offset: DocId,
num_vint_docs: usize, num_vint_docs: usize,
) { ) {
let num_consumed_bytes = let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs, TERMINATED);
if let Some(freq_decoder) = freq_decoder_opt { if let Some(freq_decoder) = freq_decoder_opt {
freq_decoder.uncompress_vint_unsorted( freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
&data[num_consumed_bytes..],
num_vint_docs,
TERMINATED,
);
} }
} }
fn split_into_skips_and_postings( fn split_into_skips_and_postings(
doc_freq: u32, doc_freq: u32,
mut bytes: OwnedBytes, data: ReadOnlySource,
) -> (Option<OwnedBytes>, OwnedBytes) { ) -> (Option<ReadOnlySource>, ReadOnlySource) {
if doc_freq < COMPRESSION_BLOCK_SIZE as u32 { if doc_freq < COMPRESSION_BLOCK_SIZE as u32 {
return (None, bytes); return (None, data);
} }
let skip_len = VInt::deserialize(&mut bytes).expect("Data corrupted").0 as usize; let mut data_byte_arr = data.as_slice();
let (skip_data, postings_data) = bytes.split(skip_len); let skip_len = VInt::deserialize(&mut data_byte_arr)
.expect("Data corrupted")
.0 as usize;
let vint_len = data.len() - data_byte_arr.len();
let (skip_data, postings_data) = data.slice_from(vint_len).split(skip_len);
(Some(skip_data), postings_data) (Some(skip_data), postings_data)
} }
impl BlockSegmentPostings { impl BlockSegmentPostings {
pub(crate) fn open( pub(crate) fn from_data(
doc_freq: u32, doc_freq: u32,
data: FileSlice, data: ReadOnlySource,
record_option: IndexRecordOption, record_option: IndexRecordOption,
requested_option: IndexRecordOption, requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> { ) -> BlockSegmentPostings {
let freq_reading_option = match (record_option, requested_option) { let freq_reading_option = match (record_option, requested_option) {
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq, (IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
(_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq, (_, IndexRecordOption::Basic) => FreqReadingOption::SkipFreq,
(_, _) => FreqReadingOption::ReadFreq, (_, _) => FreqReadingOption::ReadFreq,
}; };
let (skip_data_opt, postings_data) = let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
split_into_skips_and_postings(doc_freq, data.read_bytes()?);
let skip_reader = match skip_data_opt { let skip_reader = match skip_data_opt {
Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option), Some(skip_data) => SkipReader::new(skip_data, doc_freq, record_option),
None => SkipReader::new(OwnedBytes::empty(), doc_freq, record_option), None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
}; };
let doc_freq = doc_freq as usize;
let mut block_segment_postings = BlockSegmentPostings { let mut block_segment_postings = BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED), doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: std::usize::MAX, loaded_offset: std::usize::MAX,
freq_decoder: BlockDecoder::with_val(1), freq_decoder: BlockDecoder::with_val(1),
freq_reading_option, freq_reading_option,
block_max_score_cache: None,
doc_freq, doc_freq,
data: postings_data, data: postings_data,
skip_reader, skip_reader,
}; };
block_segment_postings.load_block(); block_segment_postings.load_block();
Ok(block_segment_postings) block_segment_postings
}
/// Returns the block_max_score for the current block.
/// It does not require the block to be loaded. For instance, it is ok to call this method
/// after having called `.shallow_advance(..)`.
///
/// See `TermScorer::block_max_score(..)` for more information.
pub fn block_max_score(
&mut self,
fieldnorm_reader: &FieldNormReader,
bm25_weight: &BM25Weight,
) -> Score {
if let Some(score) = self.block_max_score_cache {
return score;
}
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
// if we are on a full block, the skip reader should have the block max information
// for us
self.block_max_score_cache = Some(skip_reader_max_score);
return skip_reader_max_score;
}
// this is the last block of the segment posting list.
// If it is actually loaded, we can compute block max manually.
if self.block_is_loaded() {
let docs = self.doc_decoder.output_array().iter().cloned();
let freqs = self.freq_decoder.output_array().iter().cloned();
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
bm25_weight.score(fieldnorm_id, term_freq)
});
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
self.block_max_score_cache = Some(block_max_score);
return block_max_score;
}
// We do not have access to any good block max value. We return bm25_weight.max_score()
// as it is a valid upperbound.
//
// We do not cache it however, so that it gets computed when once block is loaded.
bm25_weight.max_score()
}
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.freq_reading_option
} }
// Resets the block segment postings on another position // Resets the block segment postings on another position
@@ -172,26 +112,24 @@ impl BlockSegmentPostings {
// # Warning // # Warning
// //
// This does not reset the positions list. // This does not reset the positions list.
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) { pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data); let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
self.data = postings_data; self.data = ReadOnlySource::new(postings_data);
self.block_max_score_cache = None;
self.loaded_offset = std::usize::MAX; self.loaded_offset = std::usize::MAX;
if let Some(skip_data) = skip_data_opt { if let Some(skip_data) = skip_data_opt {
self.skip_reader.reset(skip_data, doc_freq); self.skip_reader.reset(skip_data, doc_freq);
} else { } else {
self.skip_reader.reset(OwnedBytes::empty(), doc_freq); self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
} }
self.doc_freq = doc_freq; self.doc_freq = doc_freq as usize;
self.load_block(); self.load_block();
} }
/// Returns the overall number of documents in the block postings. /// Returns the document frequency associated to this block postings.
/// It does not take in account whether documents are deleted or not.
/// ///
/// This `doc_freq` is simply the sum of the length of all of the blocks /// This `doc_freq` is simply the sum of the length of all of the blocks
/// length, and it does not take in account deleted documents. /// length, and it does not take in account deleted documents.
pub fn doc_freq(&self) -> u32 { pub fn doc_freq(&self) -> usize {
self.doc_freq self.doc_freq
} }
@@ -201,20 +139,11 @@ impl BlockSegmentPostings {
/// returned by `.docs()` is empty. /// returned by `.docs()` is empty.
#[inline] #[inline]
pub fn docs(&self) -> &[DocId] { pub fn docs(&self) -> &[DocId] {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_array() self.doc_decoder.output_array()
} }
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
/// as it happens for the last block of the posting list).
///
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
///
/// This method is useful to run SSE2 linear search.
#[inline(always)] #[inline(always)]
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer { pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_aligned() self.doc_decoder.output_aligned()
} }
@@ -227,14 +156,12 @@ impl BlockSegmentPostings {
/// Return the array of `term freq` in the block. /// Return the array of `term freq` in the block.
#[inline] #[inline]
pub fn freqs(&self) -> &[u32] { pub fn freqs(&self) -> &[u32] {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output_array() self.freq_decoder.output_array()
} }
/// Return the frequency at index `idx` of the block. /// Return the frequency at index `idx` of the block.
#[inline] #[inline]
pub fn freq(&self, idx: usize) -> u32 { pub fn freq(&self, idx: usize) -> u32 {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output(idx) self.freq_decoder.output(idx)
} }
@@ -245,40 +172,23 @@ impl BlockSegmentPostings {
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1` /// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
#[inline] #[inline]
pub fn block_len(&self) -> usize { pub fn block_len(&self) -> usize {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_len self.doc_decoder.output_len
} }
pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Position on a block that may contains `target_doc`. /// Position on a block that may contains `target_doc`.
/// ///
/// If all docs are smaller than target, the block loaded may be empty, /// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block. /// or be the last an incomplete VInt block.
pub fn seek(&mut self, target_doc: DocId) { pub fn seek(&mut self, target_doc: DocId) {
self.shallow_seek(target_doc); self.skip_reader.seek(target_doc);
self.load_block(); self.load_block();
} }
pub(crate) fn position_offset(&self) -> u64 { fn load_block(&mut self) {
self.skip_reader.position_offset()
}
/// Dangerous API! This calls seek on the skip list,
/// but does not `.load_block()` afterwards.
///
/// `.load_block()` needs to be called manually afterwards.
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
if self.skip_reader.seek(target_doc) {
self.block_max_score_cache = None;
}
}
pub(crate) fn block_is_loaded(&self) -> bool {
self.loaded_offset == self.skip_reader.byte_offset()
}
pub(crate) fn load_block(&mut self) {
let offset = self.skip_reader.byte_offset(); let offset = self.skip_reader.byte_offset();
if self.loaded_offset == offset { if self.loaded_offset == offset {
return; return;
@@ -303,14 +213,11 @@ impl BlockSegmentPostings {
tf_num_bits, tf_num_bits,
); );
} }
BlockInfo::VInt { num_docs } => { BlockInfo::VInt(num_vint_docs) => {
let data = { self.doc_decoder.clear();
if num_docs == 0 { if num_vint_docs == 0 {
&[] return;
} else { }
&self.data.as_slice()[offset..]
}
};
decode_vint_block( decode_vint_block(
&mut self.doc_decoder, &mut self.doc_decoder,
if let FreqReadingOption::ReadFreq = self.freq_reading_option { if let FreqReadingOption::ReadFreq = self.freq_reading_option {
@@ -318,9 +225,9 @@ impl BlockSegmentPostings {
} else { } else {
None None
}, },
data, &self.data.as_slice()[offset..],
self.skip_reader.last_doc_in_previous_block, self.skip_reader.last_doc_in_previous_block,
num_docs as usize, num_vint_docs as usize,
); );
} }
} }
@@ -331,7 +238,6 @@ impl BlockSegmentPostings {
/// Returns false iff there was no remaining blocks. /// Returns false iff there was no remaining blocks.
pub fn advance(&mut self) { pub fn advance(&mut self) {
self.skip_reader.advance(); self.skip_reader.advance();
self.block_max_score_cache = None;
self.load_block(); self.load_block();
} }
@@ -339,13 +245,12 @@ impl BlockSegmentPostings {
pub fn empty() -> BlockSegmentPostings { pub fn empty() -> BlockSegmentPostings {
BlockSegmentPostings { BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED), doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: 0, loaded_offset: std::usize::MAX,
freq_decoder: BlockDecoder::with_val(1), freq_decoder: BlockDecoder::with_val(1),
freq_reading_option: FreqReadingOption::NoFreq, freq_reading_option: FreqReadingOption::NoFreq,
block_max_score_cache: None,
doc_freq: 0, doc_freq: 0,
data: OwnedBytes::empty(), data: ReadOnlySource::new(vec![]),
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic), skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
} }
} }
} }
@@ -368,10 +273,8 @@ mod tests {
#[test] #[test]
fn test_empty_segment_postings() { fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty(); let mut postings = SegmentPostings::empty();
assert_eq!(postings.doc(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED); assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.doc_freq(), 0);
assert_eq!(postings.len(), 0); assert_eq!(postings.len(), 0);
} }
@@ -391,8 +294,6 @@ mod tests {
#[test] #[test]
fn test_empty_block_segment_postings() { fn test_empty_block_segment_postings() {
let mut postings = BlockSegmentPostings::empty(); let mut postings = BlockSegmentPostings::empty();
assert!(postings.docs().is_empty());
assert_eq!(postings.doc_freq(), 0);
postings.advance(); postings.advance();
assert!(postings.docs().is_empty()); assert!(postings.docs().is_empty());
assert_eq!(postings.doc_freq(), 0); assert_eq!(postings.doc_freq(), 0);
@@ -455,7 +356,7 @@ mod tests {
let int_field = schema_builder.add_u64_field("id", INDEXED); let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut last_doc = 0u32; let mut last_doc = 0u32;
for &doc in docs { for &doc in docs {
for _ in last_doc..doc { for _ in last_doc..doc {
@@ -467,16 +368,14 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(int_field).unwrap(); let inverted_index = segment_reader.inverted_index(int_field);
let term = Term::from_field_u64(int_field, 0u64); let term = Term::from_field_u64(int_field, 0u64);
let term_info = inverted_index.get_term_info(&term).unwrap().unwrap(); let term_info = inverted_index.get_term_info(&term).unwrap();
inverted_index inverted_index.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)
.unwrap()
} }
#[test] #[test]
fn test_block_segment_postings_seek() { fn test_block_segment_postings_skip2() {
let mut docs = vec![0]; let mut docs = vec![0];
for i in 0..1300 { for i in 0..1300 {
docs.push((i * i / 100) + i); docs.push((i * i / 100) + i);
@@ -493,38 +392,37 @@ mod tests {
} }
#[test] #[test]
fn test_reset_block_segment_postings() -> crate::Result<()> { fn test_reset_block_segment_postings() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INDEXED); let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// create two postings list, one containg even number, // create two postings list, one containg even number,
// the other containing odd numbers. // the other containing odd numbers.
for i in 0..6 { for i in 0..6 {
let doc = doc!(int_field=> (i % 2) as u64); let doc = doc!(int_field=> (i % 2) as u64);
index_writer.add_document(doc); index_writer.add_document(doc);
} }
index_writer.commit()?; index_writer.commit().unwrap();
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut block_segments; let mut block_segments;
{ {
let term = Term::from_field_u64(int_field, 0u64); let term = Term::from_field_u64(int_field, 0u64);
let inverted_index = segment_reader.inverted_index(int_field)?; let inverted_index = segment_reader.inverted_index(int_field);
let term_info = inverted_index.get_term_info(&term)?.unwrap(); let term_info = inverted_index.get_term_info(&term).unwrap();
block_segments = inverted_index block_segments = inverted_index
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?; .read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic);
} }
assert_eq!(block_segments.docs(), &[0, 2, 4]); assert_eq!(block_segments.docs(), &[0, 2, 4]);
{ {
let term = Term::from_field_u64(int_field, 1u64); let term = Term::from_field_u64(int_field, 1u64);
let inverted_index = segment_reader.inverted_index(int_field)?; let inverted_index = segment_reader.inverted_index(int_field);
let term_info = inverted_index.get_term_info(&term)?.unwrap(); let term_info = inverted_index.get_term_info(&term).unwrap();
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?; inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
} }
assert_eq!(block_segments.docs(), &[1, 3, 5]); assert_eq!(block_segments.docs(), &[1, 3, 5]);
Ok(())
} }
} }

View File

@@ -1,4 +1,5 @@
use crate::common::FixedSize; use crate::common::FixedSize;
use crate::docset::TERMINATED;
use bitpacking::{BitPacker, BitPacker4x}; use bitpacking::{BitPacker, BitPacker4x};
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN; pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
@@ -52,10 +53,8 @@ impl BlockEncoder {
/// We ensure that the OutputBuffer is align on 128 bits /// We ensure that the OutputBuffer is align on 128 bits
/// in order to run SSE2 linear search on it. /// in order to run SSE2 linear search on it.
#[repr(align(128))] #[repr(align(128))]
#[derive(Clone)]
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]); pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
#[derive(Clone)]
pub struct BlockDecoder { pub struct BlockDecoder {
bitpacker: BitPacker4x, bitpacker: BitPacker4x,
output: AlignedBuffer, output: AlignedBuffer,
@@ -108,6 +107,11 @@ impl BlockDecoder {
pub fn output(&self, idx: usize) -> u32 { pub fn output(&self, idx: usize) -> u32 {
self.output.0[idx] self.output.0[idx]
} }
pub fn clear(&mut self) {
self.output_len = 0;
self.output.0.iter_mut().for_each(|el| *el = TERMINATED);
}
} }
pub trait VIntEncoder { pub trait VIntEncoder {
@@ -144,14 +148,11 @@ pub trait VIntDecoder {
/// For instance, if delta encoded are `1, 3, 9`, and the /// For instance, if delta encoded are `1, 3, 9`, and the
/// `offset` is 5, then the output will be: /// `offset` is 5, then the output will be:
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18` /// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
///
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
fn uncompress_vint_sorted( fn uncompress_vint_sorted(
&mut self, &mut self,
compressed_data: &[u8], compressed_data: &[u8],
offset: u32, offset: u32,
num_els: usize, num_els: usize,
padding: u32,
) -> usize; ) -> usize;
/// Uncompress an array of `u32s`, compressed using variable /// Uncompress an array of `u32s`, compressed using variable
@@ -159,14 +160,7 @@ pub trait VIntDecoder {
/// ///
/// The method takes a number of int to decompress, and returns /// The method takes a number of int to decompress, and returns
/// the amount of bytes that were read to decompress them. /// the amount of bytes that were read to decompress them.
/// fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize;
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
fn uncompress_vint_unsorted(
&mut self,
compressed_data: &[u8],
num_els: usize,
padding: u32,
) -> usize;
} }
impl VIntEncoder for BlockEncoder { impl VIntEncoder for BlockEncoder {
@@ -185,21 +179,13 @@ impl VIntDecoder for BlockDecoder {
compressed_data: &[u8], compressed_data: &[u8],
offset: u32, offset: u32,
num_els: usize, num_els: usize,
padding: u32,
) -> usize { ) -> usize {
self.output_len = num_els; self.output_len = num_els;
self.output.0.iter_mut().for_each(|el| *el = padding);
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset) vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
} }
fn uncompress_vint_unsorted( fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize {
&mut self,
compressed_data: &[u8],
num_els: usize,
padding: u32,
) -> usize {
self.output_len = num_els; self.output_len = num_els;
self.output.0.iter_mut().for_each(|el| *el = padding);
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els]) vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
} }
} }
@@ -208,7 +194,6 @@ impl VIntDecoder for BlockDecoder {
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::TERMINATED;
#[test] #[test]
fn test_encode_sorted_block() { fn test_encode_sorted_block() {
@@ -260,6 +245,19 @@ pub mod tests {
} }
} }
#[test]
fn test_clearing() {
let mut encoder = BlockEncoder::new();
let vals = (0u32..128u32).map(|i| i * 3).collect::<Vec<_>>();
let (num_bits, compressed) = encoder.compress_block_sorted(&vals[..], 0u32);
let mut decoder = BlockDecoder::default();
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
assert_eq!(decoder.output_len, 128);
assert_eq!(decoder.output_array(), &vals[..]);
decoder.clear();
assert!(decoder.output_array().is_empty());
}
#[test] #[test]
fn test_encode_unsorted_block_with_junk() { fn test_encode_unsorted_block_with_junk() {
let mut compressed: Vec<u8> = Vec::new(); let mut compressed: Vec<u8> = Vec::new();
@@ -287,20 +285,18 @@ pub mod tests {
} }
#[test] #[test]
fn test_encode_vint() { fn test_encode_vint() {
const PADDING_VALUE: u32 = 234_234_345u32; {
let expected_length = 154; let expected_length = 154;
let mut encoder = BlockEncoder::new(); let mut encoder = BlockEncoder::new();
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect(); let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
for offset in &[0u32, 1u32, 2u32] { for offset in &[0u32, 1u32, 2u32] {
let encoded_data = encoder.compress_vint_sorted(&input, *offset); let encoded_data = encoder.compress_vint_sorted(&input, *offset);
assert!(encoded_data.len() <= expected_length); assert!(encoded_data.len() <= expected_length);
let mut decoder = BlockDecoder::default(); let mut decoder = BlockDecoder::default();
let consumed_num_bytes = let consumed_num_bytes =
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE); decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
assert_eq!(consumed_num_bytes, encoded_data.len()); assert_eq!(consumed_num_bytes, encoded_data.len());
assert_eq!(input, decoder.output_array()); assert_eq!(input, decoder.output_array());
for i in input.len()..COMPRESSION_BLOCK_SIZE {
assert_eq!(decoder.output(i), PADDING_VALUE);
} }
} }
} }
@@ -310,7 +306,6 @@ pub mod tests {
mod bench { mod bench {
use super::*; use super::*;
use crate::TERMINATED;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::Rng; use rand::Rng;
use rand::SeedableRng; use rand::SeedableRng;
@@ -341,7 +336,7 @@ mod bench {
let mut encoder = BlockEncoder::new(); let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1); let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32); let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
let mut decoder = BlockDecoder::default(); let mut decoder = BlockDecoder::new();
b.iter(|| { b.iter(|| {
decoder.uncompress_block_sorted(compressed, 0u32, num_bits); decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
}); });
@@ -376,9 +371,9 @@ mod bench {
let mut encoder = BlockEncoder::new(); let mut encoder = BlockEncoder::new();
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001); let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
let compressed = encoder.compress_vint_sorted(&data, 0u32); let compressed = encoder.compress_vint_sorted(&data, 0u32);
let mut decoder = BlockDecoder::default(); let mut decoder = BlockDecoder::new();
b.iter(|| { b.iter(|| {
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT, TERMINATED); decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
}); });
} }
} }

View File

@@ -15,15 +15,21 @@ mod stacker;
mod term_info; mod term_info;
pub(crate) use self::block_search::BlockSearcher; pub(crate) use self::block_search::BlockSearcher;
pub use self::block_segment_postings::BlockSegmentPostings;
pub use self::postings::Postings;
pub(crate) use self::postings_writer::MultiFieldPostingsWriter; pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
pub use self::segment_postings::SegmentPostings;
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer}; pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
pub use self::postings::Postings;
pub(crate) use self::skip::{BlockInfo, SkipReader}; pub(crate) use self::skip::{BlockInfo, SkipReader};
pub(crate) use self::stacker::compute_table_size;
pub use self::term_info::TermInfo; pub use self::term_info::TermInfo;
pub use self::block_segment_postings::BlockSegmentPostings;
pub use self::segment_postings::SegmentPostings;
pub(crate) use self::stacker::compute_table_size;
pub use crate::common::HasLen;
pub(crate) type UnorderedTermId = u64; pub(crate) type UnorderedTermId = u64;
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
@@ -36,8 +42,8 @@ pub(crate) enum FreqReadingOption {
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::InvertedIndexSerializer;
use super::Postings; use super::*;
use crate::core::Index; use crate::core::Index;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::core::SegmentReader; use crate::core::SegmentReader;
@@ -47,59 +53,63 @@ pub mod tests {
use crate::indexer::SegmentWriter; use crate::indexer::SegmentWriter;
use crate::merge_policy::NoMergePolicy; use crate::merge_policy::NoMergePolicy;
use crate::query::Scorer; use crate::query::Scorer;
use crate::schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
use crate::schema::{Field, TextOptions}; use crate::schema::{Field, TextOptions};
use crate::schema::{IndexRecordOption, TextFieldIndexing}; use crate::schema::{IndexRecordOption, TextFieldIndexing};
use crate::schema::{Schema, Term, INDEXED, TEXT};
use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN}; use crate::tokenizer::{SimpleTokenizer, MAX_TOKEN_LEN};
use crate::DocId; use crate::DocId;
use crate::HasLen;
use crate::Score; use crate::Score;
use std::{iter, mem}; use once_cell::sync::Lazy;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use std::iter;
#[test] #[test]
pub fn test_position_write() -> crate::Result<()> { pub fn test_position_write() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut segment = index.new_segment(); let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment)?; let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4, None)?; {
field_serializer.new_term("abc".as_bytes(), 12u32)?; let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
for doc_id in 0u32..120u32 { field_serializer.new_term("abc".as_bytes()).unwrap();
let delta_positions = vec![1, 2, 3, 2]; for doc_id in 0u32..120u32 {
field_serializer.write_doc(doc_id, 4, &delta_positions)?; let delta_positions = vec![1, 2, 3, 2];
field_serializer
.write_doc(doc_id, 4, &delta_positions)
.unwrap();
}
field_serializer.close_term().unwrap();
} }
field_serializer.close_term()?; posting_serializer.close().unwrap();
mem::drop(field_serializer); let read = segment.open_read(SegmentComponent::POSITIONS).unwrap();
posting_serializer.close()?;
let read = segment.open_read(SegmentComponent::POSITIONS)?;
assert!(read.len() <= 140); assert!(read.len() <= 140);
Ok(())
} }
#[test] #[test]
pub fn test_skip_positions() -> crate::Result<()> { pub fn test_skip_positions() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT); let title = schema_builder.add_text_field("title", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 30_000_000).unwrap();
index_writer.add_document(doc!(title => r#"abc abc abc"#)); index_writer.add_document(doc!(title => r#"abc abc abc"#));
index_writer.add_document(doc!(title => r#"abc be be be be abc"#)); index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
for _ in 0..1_000 { for _ in 0..1_000 {
index_writer.add_document(doc!(title => r#"abc abc abc"#)); index_writer.add_document(doc!(title => r#"abc abc abc"#));
} }
index_writer.add_document(doc!(title => r#"abc be be be be abc"#)); index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
index_writer.commit()?; index_writer.commit().unwrap();
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let inverted_index = searcher.segment_reader(0u32).inverted_index(title)?; let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
let term = Term::from_field_text(title, "abc"); let term = Term::from_field_text(title, "abc");
let mut positions = Vec::new(); let mut positions = Vec::new();
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
postings.positions(&mut positions); postings.positions(&mut positions);
@@ -113,7 +123,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 0); assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), 1); assert_eq!(postings.advance(), 1);
@@ -122,7 +132,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.seek(1), 1); assert_eq!(postings.seek(1), 1);
assert_eq!(postings.doc(), 1); assert_eq!(postings.doc(), 1);
@@ -131,7 +141,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.seek(1002), 1002); assert_eq!(postings.seek(1002), 1002);
assert_eq!(postings.doc(), 1002); assert_eq!(postings.doc(), 1002);
@@ -140,7 +150,7 @@ pub mod tests {
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.seek(100), 100); assert_eq!(postings.seek(100), 100);
assert_eq!(postings.seek(1002), 1002); assert_eq!(postings.seek(1002), 1002);
@@ -148,11 +158,10 @@ pub mod tests {
postings.positions(&mut positions); postings.positions(&mut positions);
assert_eq!(&[0, 5], &positions[..]); assert_eq!(&[0, 5], &positions[..]);
} }
Ok(())
} }
#[test] #[test]
pub fn test_drop_token_that_are_too_long() -> crate::Result<()> { pub fn test_drop_token_that_are_too_long() {
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect(); let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect(); let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
exceeding_token_text.push_str(" hello"); exceeding_token_text.push_str(" hello");
@@ -169,7 +178,7 @@ pub mod tests {
.tokenizers() .tokenizers()
.register("simple_no_truncation", SimpleTokenizer); .register("simple_no_truncation", SimpleTokenizer);
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy)); index_writer.set_merge_policy(Box::new(NoMergePolicy));
{ {
index_writer.add_document(doc!(text_field=>exceeding_token_text)); index_writer.add_document(doc!(text_field=>exceeding_token_text));
@@ -177,10 +186,10 @@ pub mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0u32); let segment_reader = searcher.segment_reader(0u32);
let inverted_index = segment_reader.inverted_index(text_field)?; let inverted_index = segment_reader.inverted_index(text_field);
assert_eq!(inverted_index.terms().num_terms(), 1); assert_eq!(inverted_index.terms().num_terms(), 1);
let mut bytes = vec![]; let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?); assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
assert_eq!(&bytes, b"hello"); assert_eq!(&bytes, b"hello");
} }
{ {
@@ -189,17 +198,16 @@ pub mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(1u32); let segment_reader = searcher.segment_reader(1u32);
let inverted_index = segment_reader.inverted_index(text_field)?; let inverted_index = segment_reader.inverted_index(text_field);
assert_eq!(inverted_index.terms().num_terms(), 1); assert_eq!(inverted_index.terms().num_terms(), 1);
let mut bytes = vec![]; let mut bytes = vec![];
assert!(inverted_index.terms().ord_to_term(0, &mut bytes)?); assert!(inverted_index.terms().ord_to_term(0, &mut bytes));
assert_eq!(&bytes[..], ok_token_text.as_bytes()); assert_eq!(&bytes[..], ok_token_text.as_bytes());
} }
Ok(())
} }
#[test] #[test]
pub fn test_position_and_fieldnorm1() -> crate::Result<()> { pub fn test_position_and_fieldnorm1() {
let mut positions = Vec::new(); let mut positions = Vec::new();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
@@ -211,38 +219,42 @@ pub mod tests {
let mut segment_writer = let mut segment_writer =
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap(); SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
{ {
let mut doc = Document::default();
// checking that position works if the field has two values // checking that position works if the field has two values
doc.add_text(text_field, "a b a c a d a a.");
doc.add_text(text_field, "d d d d a");
let op = AddOperation { let op = AddOperation {
opstamp: 0u64, opstamp: 0u64,
document: doc!( document: doc,
text_field => "a b a c a d a a.",
text_field => "d d d d a"
),
}; };
segment_writer.add_document(op, &schema)?; segment_writer.add_document(op, &schema).unwrap();
} }
{ {
let mut doc = Document::default();
doc.add_text(text_field, "b a");
let op = AddOperation { let op = AddOperation {
opstamp: 1u64, opstamp: 1u64,
document: doc!(text_field => "b a"), document: doc,
}; };
segment_writer.add_document(op, &schema).unwrap(); segment_writer.add_document(op, &schema).unwrap();
} }
for i in 2..1000 { for i in 2..1000 {
let mut text: String = iter::repeat("e ").take(i).collect(); let mut doc = Document::default();
let mut text = iter::repeat("e ").take(i).collect::<String>();
text.push_str(" a"); text.push_str(" a");
doc.add_text(text_field, &text);
let op = AddOperation { let op = AddOperation {
opstamp: 2u64, opstamp: 2u64,
document: doc!(text_field => text), document: doc,
}; };
segment_writer.add_document(op, &schema).unwrap(); segment_writer.add_document(op, &schema).unwrap();
} }
segment_writer.finalize()?; segment_writer.finalize().unwrap();
} }
{ {
let segment_reader = SegmentReader::open(&segment)?; let segment_reader = SegmentReader::open(&segment).unwrap();
{ {
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field)?; let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5); assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
assert_eq!(fieldnorm_reader.fieldnorm(1), 2); assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
for i in 2..1000 { for i in 2..1000 {
@@ -255,15 +267,15 @@ pub mod tests {
{ {
let term_a = Term::from_field_text(text_field, "abcdef"); let term_a = Term::from_field_text(text_field, "abcdef");
assert!(segment_reader assert!(segment_reader
.inverted_index(term_a.field())? .inverted_index(term_a.field())
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
} }
{ {
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
let mut postings_a = segment_reader let mut postings_a = segment_reader
.inverted_index(term_a.field())? .inverted_index(term_a.field())
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings_a.len(), 1000); assert_eq!(postings_a.len(), 1000);
assert_eq!(postings_a.doc(), 0); assert_eq!(postings_a.doc(), 0);
@@ -285,8 +297,8 @@ pub mod tests {
{ {
let term_e = Term::from_field_text(text_field, "e"); let term_e = Term::from_field_text(text_field, "e");
let mut postings_e = segment_reader let mut postings_e = segment_reader
.inverted_index(term_e.field())? .inverted_index(term_e.field())
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings_e.len(), 1000 - 2); assert_eq!(postings_e.len(), 1000 - 2);
for i in 2u32..1000u32 { for i in 2u32..1000u32 {
@@ -302,18 +314,17 @@ pub mod tests {
assert_eq!(postings_e.doc(), TERMINATED); assert_eq!(postings_e.doc(), TERMINATED);
} }
} }
Ok(())
} }
#[test] #[test]
pub fn test_position_and_fieldnorm2() -> crate::Result<()> { pub fn test_position_and_fieldnorm2() {
let mut positions: Vec<u32> = Vec::new(); let mut positions: Vec<u32> = Vec::new();
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "g b b d c g c")); index_writer.add_document(doc!(text_field => "g b b d c g c"));
index_writer.add_document(doc!(text_field => "g a b b a d c g c")); index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
@@ -322,17 +333,16 @@ pub mod tests {
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut postings = segment_reader let mut postings = segment_reader
.inverted_index(text_field)? .inverted_index(text_field)
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)? .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert_eq!(postings.doc(), 1u32); assert_eq!(postings.doc(), 1u32);
postings.positions(&mut positions); postings.positions(&mut positions);
assert_eq!(&positions[..], &[1u32, 4]); assert_eq!(&positions[..], &[1u32, 4]);
Ok(())
} }
#[test] #[test]
fn test_skip_next() -> crate::Result<()> { fn test_skip_next() {
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0); let term_0 = Term::from_field_u64(Field::from_field_id(0), 0);
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1); let term_1 = Term::from_field_u64(Field::from_field_id(0), 1);
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2); let term_2 = Term::from_field_u64(Field::from_field_id(0), 2);
@@ -343,9 +353,10 @@ pub mod tests {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_u64_field("value", INDEXED); let value_field = schema_builder.add_u64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for i in 0u64..num_docs as u64 { for i in 0u64..num_docs as u64 {
let doc = doc!(value_field => 2u64, value_field => i % 2u64); let doc = doc!(value_field => 2u64, value_field => i % 2u64);
index_writer.add_document(doc); index_writer.add_document(doc);
@@ -354,15 +365,15 @@ pub mod tests {
} }
index index
}; };
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
// check that the basic usage works // check that the basic usage works
for i in 0..num_docs - 1 { for i in 0..num_docs - 1 {
for j in i + 1..num_docs { for j in i + 1..num_docs {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
assert_eq!(segment_postings.seek(i), i); assert_eq!(segment_postings.seek(i), i);
assert_eq!(segment_postings.doc(), i); assert_eq!(segment_postings.doc(), i);
@@ -374,8 +385,8 @@ pub mod tests {
{ {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
// check that `skip_next` advances the iterator // check that `skip_next` advances the iterator
@@ -394,8 +405,8 @@ pub mod tests {
// check that filtering works // check that filtering works
{ {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_0.field())? .inverted_index(term_0.field())
.read_postings(&term_0, IndexRecordOption::Basic)? .read_postings(&term_0, IndexRecordOption::Basic)
.unwrap(); .unwrap();
for i in 0..num_docs / 2 { for i in 0..num_docs / 2 {
@@ -404,8 +415,8 @@ pub mod tests {
} }
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_0.field())? .inverted_index(term_0.field())
.read_postings(&term_0, IndexRecordOption::Basic)? .read_postings(&term_0, IndexRecordOption::Basic)
.unwrap(); .unwrap();
for i in 0..num_docs / 2 - 1 { for i in 0..num_docs / 2 - 1 {
@@ -416,19 +427,19 @@ pub mod tests {
// delete some of the documents // delete some of the documents
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_0); index_writer.delete_term(term_0);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
// make sure seeking still works // make sure seeking still works
for i in 0..num_docs { for i in 0..num_docs {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
if i % 2 == 0 { if i % 2 == 0 {
@@ -444,8 +455,8 @@ pub mod tests {
// now try with a longer sequence // now try with a longer sequence
{ {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(term_2.field())? .inverted_index(term_2.field())
.read_postings(&term_2, IndexRecordOption::Basic)? .read_postings(&term_2, IndexRecordOption::Basic)
.unwrap(); .unwrap();
let mut last = 2; // start from 5 to avoid seeking to 3 twice let mut last = 2; // start from 5 to avoid seeking to 3 twice
@@ -470,21 +481,69 @@ pub mod tests {
// delete everything else // delete everything else
{ {
let mut index_writer = index.writer_for_tests()?; let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_1); index_writer.delete_term(term_1);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
let searcher = index.reader()?.searcher(); let searcher = index.reader().unwrap().searcher();
// finally, check that it's empty // finally, check that it's empty
{ {
let searchable_segment_ids = index.searchable_segment_ids()?; let searchable_segment_ids = index
.searchable_segment_ids()
.expect("could not get index segment ids");
assert!(searchable_segment_ids.is_empty()); assert!(searchable_segment_ids.is_empty());
assert_eq!(searcher.num_docs(), 0); assert_eq!(searcher.num_docs(), 0);
} }
Ok(())
} }
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "a")
});
pub static TERM_B: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "b")
});
pub static TERM_C: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "c")
});
pub static TERM_D: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "d")
});
pub static INDEX: Lazy<Index> = Lazy::new(|| {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let index = Index::create_in_ram(schema);
let posting_list_size = 1_000_000;
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for _ in 0..posting_list_size {
let mut doc = Document::default();
if rng.gen_bool(1f64 / 15f64) {
doc.add_text(text_field, "a");
}
if rng.gen_bool(1f64 / 10f64) {
doc.add_text(text_field, "b");
}
if rng.gen_bool(1f64 / 5f64) {
doc.add_text(text_field, "c");
}
doc.add_text(text_field, "d");
index_writer.add_document(doc);
}
assert!(index_writer.commit().is_ok());
}
index
});
/// Wraps a given docset, and forward alls call but the /// Wraps a given docset, and forward alls call but the
/// `.skip_next(...)`. This is useful to test that a specialized /// `.skip_next(...)`. This is useful to test that a specialized
/// implementation of `.skip_next(...)` is consistent /// implementation of `.skip_next(...)` is consistent
@@ -549,65 +608,15 @@ pub mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use super::tests::*;
use crate::docset::TERMINATED; use crate::docset::TERMINATED;
use crate::query::Intersection; use crate::query::Intersection;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::{Document, Field, Schema, Term, STRING};
use crate::tests; use crate::tests;
use crate::DocSet; use crate::DocSet;
use crate::Index;
use once_cell::sync::Lazy;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use test::{self, Bencher}; use test::{self, Bencher};
pub static TERM_A: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "a")
});
pub static TERM_B: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "b")
});
pub static TERM_C: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "c")
});
pub static TERM_D: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0);
Term::from_field_text(field, "d")
});
pub static INDEX: Lazy<Index> = Lazy::new(|| {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let index = Index::create_in_ram(schema);
let posting_list_size = 1_000_000;
{
let mut index_writer = index.writer_for_tests().unwrap();
for _ in 0..posting_list_size {
let mut doc = Document::default();
if rng.gen_bool(1f64 / 15f64) {
doc.add_text(text_field, "a");
}
if rng.gen_bool(1f64 / 10f64) {
doc.add_text(text_field, "b");
}
if rng.gen_bool(1f64 / 5f64) {
doc.add_text(text_field, "c");
}
doc.add_text(text_field, "d");
index_writer.add_document(doc);
}
assert!(index_writer.commit().is_ok());
}
index
});
#[bench] #[bench]
fn bench_segment_postings(b: &mut Bencher) { fn bench_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap(); let reader = INDEX.reader().unwrap();
@@ -617,9 +626,7 @@ mod bench {
b.iter(|| { b.iter(|| {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.unwrap()
.read_postings(&*TERM_A, IndexRecordOption::Basic) .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
while segment_postings.advance() != TERMINATED {} while segment_postings.advance() != TERMINATED {}
}); });
@@ -633,27 +640,19 @@ mod bench {
b.iter(|| { b.iter(|| {
let segment_postings_a = segment_reader let segment_postings_a = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.unwrap()
.read_postings(&*TERM_A, IndexRecordOption::Basic) .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let segment_postings_b = segment_reader let segment_postings_b = segment_reader
.inverted_index(TERM_B.field()) .inverted_index(TERM_B.field())
.unwrap()
.read_postings(&*TERM_B, IndexRecordOption::Basic) .read_postings(&*TERM_B, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let segment_postings_c = segment_reader let segment_postings_c = segment_reader
.inverted_index(TERM_C.field()) .inverted_index(TERM_C.field())
.unwrap()
.read_postings(&*TERM_C, IndexRecordOption::Basic) .read_postings(&*TERM_C, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let segment_postings_d = segment_reader let segment_postings_d = segment_reader
.inverted_index(TERM_D.field()) .inverted_index(TERM_D.field())
.unwrap()
.read_postings(&*TERM_D, IndexRecordOption::Basic) .read_postings(&*TERM_D, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let mut intersection = Intersection::new(vec![ let mut intersection = Intersection::new(vec![
segment_postings_a, segment_postings_a,
@@ -673,9 +672,7 @@ mod bench {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.unwrap()
.read_postings(&*TERM_A, IndexRecordOption::Basic) .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let mut existing_docs = Vec::new(); let mut existing_docs = Vec::new();
@@ -691,9 +688,7 @@ mod bench {
b.iter(|| { b.iter(|| {
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.unwrap()
.read_postings(&*TERM_A, IndexRecordOption::Basic) .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
for doc in &existing_docs { for doc in &existing_docs {
if segment_postings.seek(*doc) == TERMINATED { if segment_postings.seek(*doc) == TERMINATED {
@@ -732,14 +727,12 @@ mod bench {
let n: u32 = test::black_box(17); let n: u32 = test::black_box(17);
let mut segment_postings = segment_reader let mut segment_postings = segment_reader
.inverted_index(TERM_A.field()) .inverted_index(TERM_A.field())
.unwrap()
.read_postings(&*TERM_A, IndexRecordOption::Basic) .read_postings(&*TERM_A, IndexRecordOption::Basic)
.unwrap()
.unwrap(); .unwrap();
let mut s = 0u32; let mut s = 0u32;
while segment_postings.doc() != TERMINATED { while segment_postings.doc() != TERMINATED {
s += (segment_postings.doc() & n) % 1024; s += (segment_postings.doc() & n) % 1024;
segment_postings.advance(); segment_postings.advance()
} }
s s
}); });

View File

@@ -1,6 +1,5 @@
use super::stacker::{Addr, MemoryArena, TermHashMap}; use super::stacker::{Addr, MemoryArena, TermHashMap};
use crate::fieldnorm::FieldNormReaders;
use crate::postings::recorder::{ use crate::postings::recorder::{
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder, BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
}; };
@@ -38,8 +37,12 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter>
| FieldType::I64(_) | FieldType::I64(_)
| FieldType::F64(_) | FieldType::F64(_)
| FieldType::Date(_) | FieldType::Date(_)
| FieldType::Bytes(_)
| FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(), | FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
FieldType::Bytes => {
// FieldType::Bytes cannot actually be indexed.
// TODO fix during the indexer refactoring described in #276
SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
}
} }
} }
@@ -101,7 +104,6 @@ impl MultiFieldPostingsWriter {
doc: DocId, doc: DocId,
field: Field, field: Field,
token_stream: &mut dyn TokenStream, token_stream: &mut dyn TokenStream,
term_buffer: &mut Term,
) -> u32 { ) -> u32 {
let postings_writer = let postings_writer =
self.per_field_postings_writers[field.field_id() as usize].deref_mut(); self.per_field_postings_writers[field.field_id() as usize].deref_mut();
@@ -111,7 +113,6 @@ impl MultiFieldPostingsWriter {
field, field,
token_stream, token_stream,
&mut self.heap, &mut self.heap,
term_buffer,
) )
} }
@@ -127,7 +128,6 @@ impl MultiFieldPostingsWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> { ) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect(); self.term_index.iter().collect();
@@ -157,17 +157,12 @@ impl MultiFieldPostingsWriter {
unordered_term_mappings.insert(field, mapping); unordered_term_mappings.insert(field, mapping);
} }
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {} FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {}
FieldType::Bytes(_) => {} FieldType::Bytes => {}
} }
let postings_writer = let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
self.per_field_postings_writers[field.field_id() as usize].as_ref(); let mut field_serializer =
let fieldnorm_reader = fieldnorm_readers.get_field(field)?; serializer.new_field(field, postings_writer.total_num_tokens())?;
let mut field_serializer = serializer.new_field(
field,
postings_writer.total_num_tokens(),
fieldnorm_reader,
)?;
postings_writer.serialize( postings_writer.serialize(
&term_offsets[start..stop], &term_offsets[start..stop],
&mut field_serializer, &mut field_serializer,
@@ -219,20 +214,13 @@ pub trait PostingsWriter {
field: Field, field: Field,
token_stream: &mut dyn TokenStream, token_stream: &mut dyn TokenStream,
heap: &mut MemoryArena, heap: &mut MemoryArena,
term_buffer: &mut Term,
) -> u32 { ) -> u32 {
term_buffer.set_field(field); let mut term = Term::for_field(field);
let mut sink = |token: &Token| { let mut sink = |token: &Token| {
// We skip all tokens with a len greater than u16. // We skip all tokens with a len greater than u16.
if token.text.len() <= MAX_TOKEN_LEN { if token.text.len() <= MAX_TOKEN_LEN {
term_buffer.set_text(token.text.as_str()); term.set_text(token.text.as_str());
self.subscribe( self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
term_index,
doc_id,
token.position as u32,
&term_buffer,
heap,
);
} else { } else {
info!( info!(
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \ "A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
@@ -309,8 +297,7 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
let mut buffer_lender = BufferLender::default(); let mut buffer_lender = BufferLender::default();
for &(term_bytes, addr, _) in term_addrs { for &(term_bytes, addr, _) in term_addrs {
let recorder: Rec = termdict_heap.read(addr); let recorder: Rec = termdict_heap.read(addr);
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32); serializer.new_term(&term_bytes[4..])?;
serializer.new_term(&term_bytes[4..], term_doc_freq)?;
recorder.serialize(&mut buffer_lender, serializer, heap)?; recorder.serialize(&mut buffer_lender, serializer, heap)?;
serializer.close_term()?; serializer.close_term()?;
} }

View File

@@ -75,10 +75,6 @@ pub(crate) trait Recorder: Copy + 'static {
serializer: &mut FieldSerializer<'_>, serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena, heap: &MemoryArena,
) -> io::Result<()>; ) -> io::Result<()>;
/// Returns the number of document containing this term.
///
/// Returns `None` if not available.
fn term_doc_freq(&self) -> Option<u32>;
} }
/// Only records the doc ids /// Only records the doc ids
@@ -117,16 +113,11 @@ impl Recorder for NothingRecorder {
) -> io::Result<()> { ) -> io::Result<()> {
let buffer = buffer_lender.lend_u8(); let buffer = buffer_lender.lend_u8();
self.stack.read_to_end(heap, buffer); self.stack.read_to_end(heap, buffer);
// TODO avoid reading twice.
for doc in VInt32Reader::new(&buffer[..]) { for doc in VInt32Reader::new(&buffer[..]) {
serializer.write_doc(doc as u32, 0u32, &[][..])?; serializer.write_doc(doc as u32, 0u32, &[][..])?;
} }
Ok(()) Ok(())
} }
fn term_doc_freq(&self) -> Option<u32> {
None
}
} }
/// Recorder encoding document ids, and term frequencies /// Recorder encoding document ids, and term frequencies
@@ -135,7 +126,6 @@ pub struct TermFrequencyRecorder {
stack: ExpUnrolledLinkedList, stack: ExpUnrolledLinkedList,
current_doc: DocId, current_doc: DocId,
current_tf: u32, current_tf: u32,
term_doc_freq: u32,
} }
impl Recorder for TermFrequencyRecorder { impl Recorder for TermFrequencyRecorder {
@@ -144,7 +134,6 @@ impl Recorder for TermFrequencyRecorder {
stack: ExpUnrolledLinkedList::new(), stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(), current_doc: u32::max_value(),
current_tf: 0u32, current_tf: 0u32,
term_doc_freq: 0u32,
} }
} }
@@ -153,7 +142,6 @@ impl Recorder for TermFrequencyRecorder {
} }
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) { fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.term_doc_freq += 1;
self.current_doc = doc; self.current_doc = doc;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap)); let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
} }
@@ -184,10 +172,6 @@ impl Recorder for TermFrequencyRecorder {
Ok(()) Ok(())
} }
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
} }
/// Recorder encoding term frequencies as well as positions. /// Recorder encoding term frequencies as well as positions.
@@ -195,14 +179,12 @@ impl Recorder for TermFrequencyRecorder {
pub struct TFAndPositionRecorder { pub struct TFAndPositionRecorder {
stack: ExpUnrolledLinkedList, stack: ExpUnrolledLinkedList,
current_doc: DocId, current_doc: DocId,
term_doc_freq: u32,
} }
impl Recorder for TFAndPositionRecorder { impl Recorder for TFAndPositionRecorder {
fn new() -> Self { fn new() -> Self {
TFAndPositionRecorder { TFAndPositionRecorder {
stack: ExpUnrolledLinkedList::new(), stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(), current_doc: u32::max_value(),
term_doc_freq: 0u32,
} }
} }
@@ -212,7 +194,6 @@ impl Recorder for TFAndPositionRecorder {
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) { fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.current_doc = doc; self.current_doc = doc;
self.term_doc_freq += 1u32;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap)); let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
} }
@@ -252,10 +233,6 @@ impl Recorder for TFAndPositionRecorder {
} }
Ok(()) Ok(())
} }
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,24 +1,27 @@
use crate::common::HasLen; use crate::common::HasLen;
use crate::directory::FileSlice;
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::fastfield::DeleteBitSet;
use crate::positions::PositionReader; use crate::positions::PositionReader;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE; use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::postings::serializer::PostingsSerializer; use crate::postings::serializer::PostingsSerializer;
use crate::postings::BlockSearcher; use crate::postings::BlockSearcher;
use crate::postings::BlockSegmentPostings;
use crate::postings::Postings; use crate::postings::Postings;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::{DocId, TERMINATED}; use crate::DocId;
use crate::directory::ReadOnlySource;
use crate::postings::BlockSegmentPostings;
/// `SegmentPostings` represents the inverted list or postings associated to /// `SegmentPostings` represents the inverted list or postings associated to
/// a term in a `Segment`. /// a term in a `Segment`.
/// ///
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded. /// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
/// Positions on the other hand, are optionally entirely decoded upfront. /// Positions on the other hand, are optionally entirely decoded upfront.
#[derive(Clone)]
pub struct SegmentPostings { pub struct SegmentPostings {
pub(crate) block_cursor: BlockSegmentPostings, block_cursor: BlockSegmentPostings,
cur: usize, cur: usize,
position_reader: Option<PositionReader>, position_reader: Option<PositionReader>,
block_searcher: BlockSearcher, block_searcher: BlockSearcher,
@@ -35,31 +38,6 @@ impl SegmentPostings {
} }
} }
/// Compute the number of non-deleted documents.
///
/// This method will clone and scan through the posting lists.
/// (this is a rather expensive operation).
pub fn doc_freq_given_deletes(&self, delete_bitset: &DeleteBitSet) -> u32 {
let mut docset = self.clone();
let mut doc_freq = 0;
loop {
let doc = docset.doc();
if doc == TERMINATED {
return doc_freq;
}
if delete_bitset.is_alive(doc) {
doc_freq += 1u32;
}
docset.advance();
}
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
pub fn doc_freq(&self) -> u32 {
self.block_cursor.doc_freq()
}
/// Creates a segment postings object with the given documents /// Creates a segment postings object with the given documents
/// and no frequency encoded. /// and no frequency encoded.
/// ///
@@ -71,9 +49,7 @@ impl SegmentPostings {
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings { pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
{ {
let mut postings_serializer = let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false);
PostingsSerializer::new(&mut buffer, 0.0, IndexRecordOption::Basic, None);
postings_serializer.new_term(docs.len() as u32);
for &doc in docs { for &doc in docs {
postings_serializer.write_doc(doc, 1u32); postings_serializer.write_doc(doc, 1u32);
} }
@@ -81,58 +57,12 @@ impl SegmentPostings {
.close_term(docs.len() as u32) .close_term(docs.len() as u32)
.expect("In memory Serialization should never fail."); .expect("In memory Serialization should never fail.");
} }
let block_segment_postings = BlockSegmentPostings::open( let block_segment_postings = BlockSegmentPostings::from_data(
docs.len() as u32, docs.len() as u32,
FileSlice::from(buffer), ReadOnlySource::from(buffer),
IndexRecordOption::Basic, IndexRecordOption::Basic,
IndexRecordOption::Basic, IndexRecordOption::Basic,
)
.unwrap();
SegmentPostings::from_block_postings(block_segment_postings, None)
}
/// Helper functions to create `SegmentPostings` for tests.
#[cfg(test)]
pub fn create_from_docs_and_tfs(
doc_and_tfs: &[(u32, u32)],
fieldnorms: Option<&[u32]>,
) -> SegmentPostings {
use crate::fieldnorm::FieldNormReader;
use crate::Score;
let mut buffer: Vec<u8> = Vec::new();
let fieldnorm_reader = fieldnorms.map(FieldNormReader::for_test);
let average_field_norm = fieldnorms
.map(|fieldnorms| {
if fieldnorms.len() == 0 {
return 0.0;
}
let total_num_tokens: u64 = fieldnorms
.iter()
.map(|&fieldnorm| fieldnorm as u64)
.sum::<u64>();
total_num_tokens as Score / fieldnorms.len() as Score
})
.unwrap_or(0.0);
let mut postings_serializer = PostingsSerializer::new(
&mut buffer,
average_field_norm,
IndexRecordOption::WithFreqs,
fieldnorm_reader,
); );
postings_serializer.new_term(doc_and_tfs.len() as u32);
for &(doc, tf) in doc_and_tfs {
postings_serializer.write_doc(doc, tf);
}
postings_serializer
.close_term(doc_and_tfs.len() as u32)
.unwrap();
let block_segment_postings = BlockSegmentPostings::open(
doc_and_tfs.len() as u32,
FileSlice::from(buffer),
IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs,
)
.unwrap();
SegmentPostings::from_block_postings(block_segment_postings, None) SegmentPostings::from_block_postings(block_segment_postings, None)
} }
@@ -160,7 +90,6 @@ impl DocSet for SegmentPostings {
// next needs to be called a first time to point to the correct element. // next needs to be called a first time to point to the correct element.
#[inline] #[inline]
fn advance(&mut self) -> DocId { fn advance(&mut self) -> DocId {
debug_assert!(self.block_cursor.block_is_loaded());
if self.cur == COMPRESSION_BLOCK_SIZE - 1 { if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
self.cur = 0; self.cur = 0;
self.block_cursor.advance(); self.block_cursor.advance();
@@ -200,7 +129,7 @@ impl DocSet for SegmentPostings {
} }
/// Return the current document's `DocId`. /// Return the current document's `DocId`.
#[inline(always)] #[inline]
fn doc(&self) -> DocId { fn doc(&self) -> DocId {
self.block_cursor.doc(self.cur) self.block_cursor.doc(self.cur)
} }
@@ -212,7 +141,7 @@ impl DocSet for SegmentPostings {
impl HasLen for SegmentPostings { impl HasLen for SegmentPostings {
fn len(&self) -> usize { fn len(&self) -> usize {
self.block_cursor.doc_freq() as usize self.block_cursor.doc_freq()
} }
} }
@@ -267,7 +196,6 @@ mod tests {
use crate::common::HasLen; use crate::common::HasLen;
use crate::docset::{DocSet, TERMINATED}; use crate::docset::{DocSet, TERMINATED};
use crate::fastfield::DeleteBitSet;
use crate::postings::postings::Postings; use crate::postings::postings::Postings;
#[test] #[test]
@@ -290,14 +218,4 @@ mod tests {
let postings = SegmentPostings::empty(); let postings = SegmentPostings::empty();
assert_eq!(postings.term_freq(), 1); assert_eq!(postings.term_freq(), 1);
} }
#[test]
fn test_doc_freq() {
let docs = SegmentPostings::create_from_docs(&[0, 2, 10]);
assert_eq!(docs.doc_freq(), 3);
let delete_bitset = DeleteBitSet::for_test(&[2], 12);
assert_eq!(docs.doc_freq_given_deletes(&delete_bitset), 2);
let all_deleted = DeleteBitSet::for_test(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
assert_eq!(docs.doc_freq_given_deletes(&all_deleted), 0);
}
} }

View File

@@ -3,16 +3,13 @@ use crate::common::{BinarySerializable, VInt};
use crate::common::{CompositeWrite, CountingWriter}; use crate::common::{CompositeWrite, CountingWriter};
use crate::core::Segment; use crate::core::Segment;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::fieldnorm::FieldNormReader;
use crate::positions::PositionSerializer; use crate::positions::PositionSerializer;
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::skip::SkipSerializer; use crate::postings::skip::SkipSerializer;
use crate::query::BM25Weight; use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::schema::{IndexRecordOption, Schema};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::{DocId, Score}; use crate::DocId;
use std::cmp::Ordering;
use std::io::{self, Write}; use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
@@ -92,22 +89,20 @@ impl InvertedIndexSerializer {
&mut self, &mut self,
field: Field, field: Field,
total_num_tokens: u64, total_num_tokens: u64,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'_>> { ) -> io::Result<FieldSerializer<'_>> {
let field_entry: &FieldEntry = self.schema.get_field_entry(field); let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field); let term_dictionary_write = self.terms_write.for_field(field);
let postings_write = self.postings_write.for_field(field); let postings_write = self.postings_write.for_field(field);
total_num_tokens.serialize(postings_write)?;
let positions_write = self.positions_write.for_field(field); let positions_write = self.positions_write.for_field(field);
let positionsidx_write = self.positionsidx_write.for_field(field); let positionsidx_write = self.positionsidx_write.for_field(field);
let field_type: FieldType = (*field_entry.field_type()).clone(); let field_type: FieldType = (*field_entry.field_type()).clone();
FieldSerializer::create( FieldSerializer::create(
&field_type, &field_type,
total_num_tokens,
term_dictionary_write, term_dictionary_write,
postings_write, postings_write,
positions_write, positions_write,
positionsidx_write, positionsidx_write,
fieldnorm_reader,
) )
} }
@@ -135,32 +130,26 @@ pub struct FieldSerializer<'a> {
impl<'a> FieldSerializer<'a> { impl<'a> FieldSerializer<'a> {
fn create( fn create(
field_type: &FieldType, field_type: &FieldType,
total_num_tokens: u64,
term_dictionary_write: &'a mut CountingWriter<WritePtr>, term_dictionary_write: &'a mut CountingWriter<WritePtr>,
postings_write: &'a mut CountingWriter<WritePtr>, postings_write: &'a mut CountingWriter<WritePtr>,
positions_write: &'a mut CountingWriter<WritePtr>, positions_write: &'a mut CountingWriter<WritePtr>,
positionsidx_write: &'a mut CountingWriter<WritePtr>, positionsidx_write: &'a mut CountingWriter<WritePtr>,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'a>> { ) -> io::Result<FieldSerializer<'a>> {
total_num_tokens.serialize(postings_write)?; let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
let mode = match field_type {
FieldType::Str(ref text_options) => { FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() { if let Some(text_indexing_options) = text_options.get_indexing_options() {
text_indexing_options.index_option() let index_option = text_indexing_options.index_option();
(index_option.has_freq(), index_option.has_positions())
} else { } else {
IndexRecordOption::Basic (false, false)
} }
} }
_ => IndexRecordOption::Basic, _ => (false, false),
}; };
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?; let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
let average_fieldnorm = fieldnorm_reader
.as_ref()
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
.unwrap_or(0.0);
let postings_serializer = let postings_serializer =
PostingsSerializer::new(postings_write, average_fieldnorm, mode, fieldnorm_reader); PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
let positions_serializer_opt = if mode.has_positions() { let positions_serializer_opt = if position_enabled {
Some(PositionSerializer::new(positions_write, positionsidx_write)) Some(PositionSerializer::new(positions_write, positionsidx_write))
} else { } else {
None None
@@ -177,16 +166,14 @@ impl<'a> FieldSerializer<'a> {
} }
fn current_term_info(&self) -> TermInfo { fn current_term_info(&self) -> TermInfo {
let positions_idx = let positions_idx = self
if let Some(positions_serializer) = self.positions_serializer_opt.as_ref() { .positions_serializer_opt
positions_serializer.positions_idx() .as_ref()
} else { .map(PositionSerializer::positions_idx)
0u64 .unwrap_or(0u64);
};
TermInfo { TermInfo {
doc_freq: 0, doc_freq: 0,
postings_start_offset: self.postings_serializer.addr(), postings_offset: self.postings_serializer.addr(),
postings_stop_offset: 0u64,
positions_idx, positions_idx,
} }
} }
@@ -194,20 +181,18 @@ impl<'a> FieldSerializer<'a> {
/// Starts the postings for a new term. /// Starts the postings for a new term.
/// * term - the term. It needs to come after the previous term according /// * term - the term. It needs to come after the previous term according
/// to the lexicographical order. /// to the lexicographical order.
/// * term_doc_freq - return the number of document containing the term. /// * doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> { pub fn new_term(&mut self, term: &[u8]) -> io::Result<TermOrdinal> {
assert!( assert!(
!self.term_open, !self.term_open,
"Called new_term, while the previous term was not closed." "Called new_term, while the previous term was not closed."
); );
self.term_open = true; self.term_open = true;
self.postings_serializer.clear(); self.postings_serializer.clear();
self.current_term_info = self.current_term_info(); self.current_term_info = self.current_term_info();
self.term_dictionary_builder.insert_key(term)?; self.term_dictionary_builder.insert_key(term)?;
let term_ordinal = self.num_terms; let term_ordinal = self.num_terms;
self.num_terms += 1; self.num_terms += 1;
self.postings_serializer.new_term(term_doc_freq);
Ok(term_ordinal) Ok(term_ordinal)
} }
@@ -240,11 +225,10 @@ impl<'a> FieldSerializer<'a> {
/// using `VInt` encoding. /// using `VInt` encoding.
pub fn close_term(&mut self) -> io::Result<()> { pub fn close_term(&mut self) -> io::Result<()> {
if self.term_open { if self.term_open {
self.postings_serializer
.close_term(self.current_term_info.doc_freq)?;
self.current_term_info.postings_stop_offset = self.postings_serializer.addr();
self.term_dictionary_builder self.term_dictionary_builder
.insert_value(&self.current_term_info)?; .insert_value(&self.current_term_info)?;
self.postings_serializer
.close_term(self.current_term_info.doc_freq)?;
self.term_open = false; self.term_open = false;
} }
Ok(()) Ok(())
@@ -320,27 +304,16 @@ pub struct PostingsSerializer<W: Write> {
postings_write: Vec<u8>, postings_write: Vec<u8>,
skip_write: SkipSerializer, skip_write: SkipSerializer,
mode: IndexRecordOption, termfreq_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>, termfreq_sum_enabled: bool,
bm25_weight: Option<BM25Weight>,
num_docs: u32, // Number of docs in the segment
avg_fieldnorm: Score, // Average number of term in the field for that segment.
// this value is used to compute the block wand information.
} }
impl<W: Write> PostingsSerializer<W> { impl<W: Write> PostingsSerializer<W> {
pub fn new( pub fn new(
write: W, write: W,
avg_fieldnorm: Score, termfreq_enabled: bool,
mode: IndexRecordOption, termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
) -> PostingsSerializer<W> { ) -> PostingsSerializer<W> {
let num_docs = fieldnorm_reader
.as_ref()
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
.unwrap_or(0u32);
PostingsSerializer { PostingsSerializer {
output_write: CountingWriter::wrap(write), output_write: CountingWriter::wrap(write),
@@ -351,24 +324,8 @@ impl<W: Write> PostingsSerializer<W> {
skip_write: SkipSerializer::new(), skip_write: SkipSerializer::new(),
last_doc_id_encoded: 0u32, last_doc_id_encoded: 0u32,
mode, termfreq_enabled,
termfreq_sum_enabled,
fieldnorm_reader,
bm25_weight: None,
num_docs,
avg_fieldnorm,
}
}
pub fn new_term(&mut self, term_doc_freq: u32) {
if self.mode.has_freq() && self.num_docs > 0 {
let bm25_weight = BM25Weight::for_one_term(
term_doc_freq as u64,
self.num_docs as u64,
self.avg_fieldnorm,
);
self.bm25_weight = Some(bm25_weight);
} }
} }
@@ -384,43 +341,17 @@ impl<W: Write> PostingsSerializer<W> {
// last el block 0, offset block 1, // last el block 0, offset block 1,
self.postings_write.extend(block_encoded); self.postings_write.extend(block_encoded);
} }
if self.mode.has_freq() { if self.termfreq_enabled {
// encode the term_freqs
let (num_bits, block_encoded): (u8, &[u8]) = self let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder .block_encoder
.compress_block_unsorted(&self.block.term_freqs()); .compress_block_unsorted(&self.block.term_freqs());
self.postings_write.extend(block_encoded); self.postings_write.extend(block_encoded);
self.skip_write.write_term_freq(num_bits); self.skip_write.write_term_freq(num_bits);
if self.mode.has_positions() { if self.termfreq_sum_enabled {
// We serialize the sum of term freqs within the skip information
// in order to navigate through positions.
let sum_freq = self.block.term_freqs().iter().cloned().sum(); let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq); self.skip_write.write_total_term_freq(sum_freq);
} }
let mut blockwand_params = (0u8, 0u32);
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
let docs = self.block.doc_ids().iter().cloned();
let term_freqs = self.block.term_freqs().iter().cloned();
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
blockwand_params = fieldnorms
.zip(term_freqs)
.max_by(
|(left_fieldnorm_id, left_term_freq),
(right_fieldnorm_id, right_term_freq)| {
let left_score =
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
let right_score =
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
left_score
.partial_cmp(&right_score)
.unwrap_or(Ordering::Equal)
},
)
.unwrap();
}
}
let (fieldnorm_id, term_freq) = blockwand_params;
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
} }
self.block.clear(); self.block.clear();
} }
@@ -451,7 +382,7 @@ impl<W: Write> PostingsSerializer<W> {
self.postings_write.write_all(block_encoded)?; self.postings_write.write_all(block_encoded)?;
} }
// ... Idem for term frequencies // ... Idem for term frequencies
if self.mode.has_freq() { if self.termfreq_enabled {
let block_encoded = self let block_encoded = self
.block_encoder .block_encoder
.compress_vint_unsorted(self.block.term_freqs()); .compress_vint_unsorted(self.block.term_freqs());
@@ -469,7 +400,6 @@ impl<W: Write> PostingsSerializer<W> {
} }
self.skip_write.clear(); self.skip_write.clear();
self.postings_write.clear(); self.postings_write.clear();
self.bm25_weight = None;
Ok(()) Ok(())
} }

View File

@@ -1,46 +1,32 @@
use std::convert::TryInto; use crate::common::BinarySerializable;
use crate::directory::ReadOnlySource;
use crate::directory::OwnedBytes;
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE}; use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED}; use crate::{DocId, TERMINATED};
use owned_read::OwnedRead;
#[inline(always)]
fn encode_block_wand_max_tf(max_tf: u32) -> u8 {
max_tf.min(u8::MAX as u32) as u8
}
#[inline(always)]
fn decode_block_wand_max_tf(max_tf_code: u8) -> u32 {
if max_tf_code == u8::MAX {
u32::MAX
} else {
max_tf_code as u32
}
}
#[inline(always)]
fn read_u32(data: &[u8]) -> u32 {
u32::from_le_bytes(data[..4].try_into().unwrap())
}
#[inline(always)]
fn write_u32(val: u32, buf: &mut Vec<u8>) {
buf.extend_from_slice(&val.to_le_bytes());
}
pub struct SkipSerializer { pub struct SkipSerializer {
buffer: Vec<u8>, buffer: Vec<u8>,
prev_doc: DocId,
} }
impl SkipSerializer { impl SkipSerializer {
pub fn new() -> SkipSerializer { pub fn new() -> SkipSerializer {
SkipSerializer { buffer: Vec::new() } SkipSerializer {
buffer: Vec::new(),
prev_doc: 0u32,
}
} }
pub fn write_doc(&mut self, last_doc: DocId, doc_num_bits: u8) { pub fn write_doc(&mut self, last_doc: DocId, doc_num_bits: u8) {
write_u32(last_doc, &mut self.buffer); assert!(
last_doc > self.prev_doc,
"write_doc(...) called with non-increasing doc ids. \
Did you forget to call clear maybe?"
);
let delta_doc = last_doc - self.prev_doc;
self.prev_doc = last_doc;
delta_doc.serialize(&mut self.buffer).unwrap();
self.buffer.push(doc_num_bits); self.buffer.push(doc_num_bits);
} }
@@ -49,13 +35,9 @@ impl SkipSerializer {
} }
pub fn write_total_term_freq(&mut self, tf_sum: u32) { pub fn write_total_term_freq(&mut self, tf_sum: u32) {
write_u32(tf_sum, &mut self.buffer); tf_sum
} .serialize(&mut self.buffer)
.expect("Should never fail");
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
let block_wand_tf = encode_block_wand_max_tf(term_freq);
self.buffer
.extend_from_slice(&[fieldnorm_id, block_wand_tf]);
} }
pub fn data(&self) -> &[u8] { pub fn data(&self) -> &[u8] {
@@ -63,15 +45,15 @@ impl SkipSerializer {
} }
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.prev_doc = 0u32;
self.buffer.clear(); self.buffer.clear();
} }
} }
#[derive(Clone)]
pub(crate) struct SkipReader { pub(crate) struct SkipReader {
last_doc_in_block: DocId, last_doc_in_block: DocId,
pub(crate) last_doc_in_previous_block: DocId, pub(crate) last_doc_in_previous_block: DocId,
owned_read: OwnedBytes, owned_read: OwnedRead,
skip_info: IndexRecordOption, skip_info: IndexRecordOption,
byte_offset: usize, byte_offset: usize,
remaining_docs: u32, // number of docs remaining, including the remaining_docs: u32, // number of docs remaining, including the
@@ -87,22 +69,18 @@ pub(crate) enum BlockInfo {
doc_num_bits: u8, doc_num_bits: u8,
tf_num_bits: u8, tf_num_bits: u8,
tf_sum: u32, tf_sum: u32,
block_wand_fieldnorm_id: u8,
block_wand_term_freq: u32,
},
VInt {
num_docs: u32,
}, },
VInt(u32),
} }
impl Default for BlockInfo { impl Default for BlockInfo {
fn default() -> Self { fn default() -> Self {
BlockInfo::VInt { num_docs: 0u32 } BlockInfo::VInt(0)
} }
} }
impl SkipReader { impl SkipReader {
pub fn new(data: OwnedBytes, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader { pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
let mut skip_reader = SkipReader { let mut skip_reader = SkipReader {
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 { last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
0 0
@@ -110,9 +88,9 @@ impl SkipReader {
TERMINATED TERMINATED
}, },
last_doc_in_previous_block: 0u32, last_doc_in_previous_block: 0u32,
owned_read: data, owned_read: OwnedRead::new(data),
skip_info, skip_info,
block_info: BlockInfo::VInt { num_docs: doc_freq }, block_info: BlockInfo::VInt(doc_freq),
byte_offset: 0, byte_offset: 0,
remaining_docs: doc_freq, remaining_docs: doc_freq,
position_offset: 0u64, position_offset: 0u64,
@@ -123,15 +101,15 @@ impl SkipReader {
skip_reader skip_reader
} }
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) { pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 { self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
0 0
} else { } else {
TERMINATED TERMINATED
}; };
self.last_doc_in_previous_block = 0u32; self.last_doc_in_previous_block = 0u32;
self.owned_read = data; self.owned_read = OwnedRead::new(data);
self.block_info = BlockInfo::VInt { num_docs: doc_freq }; self.block_info = BlockInfo::VInt(doc_freq);
self.byte_offset = 0; self.byte_offset = 0;
self.remaining_docs = doc_freq; self.remaining_docs = doc_freq;
self.position_offset = 0u64; self.position_offset = 0u64;
@@ -140,21 +118,8 @@ impl SkipReader {
} }
} }
// Returns the block max score for this block if available. #[cfg(test)]
// #[inline(always)]
// The block max score is available for all full bitpacked block,
// but no available for the last VInt encoded incomplete block.
pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
match self.block_info {
BlockInfo::BitPacked {
block_wand_fieldnorm_id,
block_wand_term_freq,
..
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
BlockInfo::VInt { .. } => None,
}
}
pub(crate) fn last_doc_in_block(&self) -> DocId { pub(crate) fn last_doc_in_block(&self) -> DocId {
self.last_doc_in_block self.last_doc_in_block
} }
@@ -163,56 +128,43 @@ impl SkipReader {
self.position_offset self.position_offset
} }
#[inline(always)]
pub fn byte_offset(&self) -> usize { pub fn byte_offset(&self) -> usize {
self.byte_offset self.byte_offset
} }
fn read_block_info(&mut self) { fn read_block_info(&mut self) {
let bytes = self.owned_read.as_slice(); let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
let advance_len: usize; self.last_doc_in_block += doc_delta as DocId;
self.last_doc_in_block = read_u32(bytes); let doc_num_bits = self.owned_read.get(0);
let doc_num_bits = bytes[4];
match self.skip_info { match self.skip_info {
IndexRecordOption::Basic => { IndexRecordOption::Basic => {
advance_len = 5; self.owned_read.advance(1);
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0, tf_sum: 0,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0,
}; };
} }
IndexRecordOption::WithFreqs => { IndexRecordOption::WithFreqs => {
let tf_num_bits = bytes[5]; let tf_num_bits = self.owned_read.get(1);
let block_wand_fieldnorm_id = bytes[6];
let block_wand_term_freq = decode_block_wand_max_tf(bytes[7]);
advance_len = 8;
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
tf_sum: 0, tf_sum: 0,
block_wand_fieldnorm_id,
block_wand_term_freq,
}; };
self.owned_read.advance(2);
} }
IndexRecordOption::WithFreqsAndPositions => { IndexRecordOption::WithFreqsAndPositions => {
let tf_num_bits = bytes[5]; let tf_num_bits = self.owned_read.get(1);
let tf_sum = read_u32(&bytes[6..10]); self.owned_read.advance(2);
let block_wand_fieldnorm_id = bytes[10]; let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
let block_wand_term_freq = decode_block_wand_max_tf(bytes[11]);
advance_len = 12;
self.block_info = BlockInfo::BitPacked { self.block_info = BlockInfo::BitPacked {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
tf_sum, tf_sum,
block_wand_fieldnorm_id,
block_wand_term_freq,
}; };
} }
} }
self.owned_read.advance(advance_len);
} }
pub fn block_info(&self) -> BlockInfo { pub fn block_info(&self) -> BlockInfo {
@@ -223,15 +175,9 @@ impl SkipReader {
/// ///
/// If the target is larger than all documents, the skip_reader /// If the target is larger than all documents, the skip_reader
/// then advance to the last Variable In block. /// then advance to the last Variable In block.
pub fn seek(&mut self, target: DocId) -> bool { pub fn seek(&mut self, target: DocId) {
if self.last_doc_in_block() >= target { while self.last_doc_in_block < target {
return false;
}
loop {
self.advance(); self.advance();
if self.last_doc_in_block() >= target {
return true;
}
} }
} }
@@ -241,14 +187,13 @@ impl SkipReader {
doc_num_bits, doc_num_bits,
tf_num_bits, tf_num_bits,
tf_sum, tf_sum,
..
} => { } => {
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32; self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits); self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
self.position_offset += tf_sum as u64; self.position_offset += tf_sum as u64;
} }
BlockInfo::VInt { num_docs } => { BlockInfo::VInt(num_vint_docs) => {
debug_assert_eq!(num_docs, self.remaining_docs); debug_assert_eq!(num_vint_docs, self.remaining_docs);
self.remaining_docs = 0; self.remaining_docs = 0;
self.byte_offset = std::usize::MAX; self.byte_offset = std::usize::MAX;
} }
@@ -258,9 +203,7 @@ impl SkipReader {
self.read_block_info(); self.read_block_info();
} else { } else {
self.last_doc_in_block = TERMINATED; self.last_doc_in_block = TERMINATED;
self.block_info = BlockInfo::VInt { self.block_info = BlockInfo::VInt(self.remaining_docs);
num_docs: self.remaining_docs,
};
} }
} }
} }
@@ -271,51 +214,32 @@ mod tests {
use super::BlockInfo; use super::BlockInfo;
use super::IndexRecordOption; use super::IndexRecordOption;
use super::{SkipReader, SkipSerializer}; use super::{SkipReader, SkipSerializer};
use crate::directory::OwnedBytes; use crate::directory::ReadOnlySource;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE; use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
#[test]
fn test_encode_block_wand_max_tf() {
for tf in 0..255 {
assert_eq!(super::encode_block_wand_max_tf(tf), tf as u8);
}
for &tf in &[255, 256, 1_000_000, u32::MAX] {
assert_eq!(super::encode_block_wand_max_tf(tf), 255);
}
}
#[test]
fn test_decode_block_wand_max_tf() {
for tf in 0..255 {
assert_eq!(super::decode_block_wand_max_tf(tf), tf as u32);
}
assert_eq!(super::decode_block_wand_max_tf(255), u32::MAX);
}
#[test] #[test]
fn test_skip_with_freq() { fn test_skip_with_freq() {
let buf = { let buf = {
let mut skip_serializer = SkipSerializer::new(); let mut skip_serializer = SkipSerializer::new();
skip_serializer.write_doc(1u32, 2u8); skip_serializer.write_doc(1u32, 2u8);
skip_serializer.write_term_freq(3u8); skip_serializer.write_term_freq(3u8);
skip_serializer.write_blockwand_max(13u8, 3u32);
skip_serializer.write_doc(5u32, 5u8); skip_serializer.write_doc(5u32, 5u8);
skip_serializer.write_term_freq(2u8); skip_serializer.write_term_freq(2u8);
skip_serializer.write_blockwand_max(8u8, 2u32);
skip_serializer.data().to_owned() skip_serializer.data().to_owned()
}; };
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32; let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
let mut skip_reader = let mut skip_reader = SkipReader::new(
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::WithFreqs); ReadOnlySource::new(buf),
doc_freq,
IndexRecordOption::WithFreqs,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert_eq!(
skip_reader.block_info, skip_reader.block_info(),
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 2u8, doc_num_bits: 2u8,
tf_num_bits: 3u8, tf_num_bits: 3u8,
tf_sum: 0, tf_sum: 0
block_wand_fieldnorm_id: 13,
block_wand_term_freq: 3
} }
); );
skip_reader.advance(); skip_reader.advance();
@@ -325,17 +249,15 @@ mod tests {
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 5u8, doc_num_bits: 5u8,
tf_num_bits: 2u8, tf_num_bits: 2u8,
tf_sum: 0, tf_sum: 0
block_wand_fieldnorm_id: 8,
block_wand_term_freq: 2
} }
); );
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }); assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }); assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }); assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
} }
#[test] #[test]
@@ -347,17 +269,18 @@ mod tests {
skip_serializer.data().to_owned() skip_serializer.data().to_owned()
}; };
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32; let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
let mut skip_reader = let mut skip_reader = SkipReader::new(
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic); ReadOnlySource::from(buf),
doc_freq,
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert_eq!(
skip_reader.block_info(), skip_reader.block_info(),
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 2u8, doc_num_bits: 2u8,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0u32, tf_sum: 0u32
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
} }
); );
skip_reader.advance(); skip_reader.advance();
@@ -367,17 +290,15 @@ mod tests {
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 5u8, doc_num_bits: 5u8,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0u32, tf_sum: 0u32
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
} }
); );
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 }); assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }); assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }); assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
} }
#[test] #[test]
@@ -388,20 +309,21 @@ mod tests {
skip_serializer.data().to_owned() skip_serializer.data().to_owned()
}; };
let doc_freq = COMPRESSION_BLOCK_SIZE as u32; let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
let mut skip_reader = let mut skip_reader = SkipReader::new(
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic); ReadOnlySource::from(buf),
doc_freq,
IndexRecordOption::Basic,
);
assert_eq!(skip_reader.last_doc_in_block(), 1u32); assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!( assert_eq!(
skip_reader.block_info(), skip_reader.block_info(),
BlockInfo::BitPacked { BlockInfo::BitPacked {
doc_num_bits: 2u8, doc_num_bits: 2u8,
tf_num_bits: 0, tf_num_bits: 0,
tf_sum: 0u32, tf_sum: 0u32
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
} }
); );
skip_reader.advance(); skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 }); assert_eq!(skip_reader.block_info(), BlockInfo::VInt(0u32));
} }
} }

View File

@@ -206,8 +206,8 @@ mod tests {
fn test_stack_long() { fn test_stack_long() {
let mut heap = MemoryArena::new(); let mut heap = MemoryArena::new();
let mut stack = ExpUnrolledLinkedList::new(); let mut stack = ExpUnrolledLinkedList::new();
let data: Vec<u32> = (0..100).collect(); let source: Vec<u32> = (0..100).collect();
for &el in &data { for &el in &source {
assert!(stack assert!(stack
.writer(&mut heap) .writer(&mut heap)
.write_u32::<LittleEndian>(el) .write_u32::<LittleEndian>(el)
@@ -221,7 +221,7 @@ mod tests {
result.push(LittleEndian::read_u32(&remaining[..4])); result.push(LittleEndian::read_u32(&remaining[..4]));
remaining = &remaining[4..]; remaining = &remaining[4..];
} }
assert_eq!(&result[..], &data[..]); assert_eq!(&result[..], &source[..]);
} }
#[test] #[test]

View File

@@ -7,50 +7,35 @@ use std::io;
pub struct TermInfo { pub struct TermInfo {
/// Number of documents in the segment containing the term /// Number of documents in the segment containing the term
pub doc_freq: u32, pub doc_freq: u32,
/// Start offset of the posting list within the postings (`.idx`) file. /// Start offset within the postings (`.idx`) file.
pub postings_start_offset: u64, pub postings_offset: u64,
/// Stop offset of the posting list within the postings (`.idx`) file.
/// The byte range is `[start_offset..stop_offset)`.
pub postings_stop_offset: u64,
/// Start offset of the first block within the position (`.pos`) file. /// Start offset of the first block within the position (`.pos`) file.
pub positions_idx: u64, pub positions_idx: u64,
} }
impl TermInfo {
pub(crate) fn posting_num_bytes(&self) -> u32 {
let num_bytes = self.postings_stop_offset - self.postings_start_offset;
assert!(num_bytes <= std::u32::MAX as u64);
num_bytes as u32
}
}
impl FixedSize for TermInfo { impl FixedSize for TermInfo {
/// Size required for the binary serialization of a `TermInfo` object. /// Size required for the binary serialization of a `TermInfo` object.
/// This is large, but in practise, `TermInfo` are encoded in blocks and /// This is large, but in practise, `TermInfo` are encoded in blocks and
/// only the first `TermInfo` of a block is serialized uncompressed. /// only the first `TermInfo` of a block is serialized uncompressed.
/// The subsequent `TermInfo` are delta encoded and bitpacked. /// The subsequent `TermInfo` are delta encoded and bitpacked.
const SIZE_IN_BYTES: usize = 2 * u32::SIZE_IN_BYTES + 2 * u64::SIZE_IN_BYTES; const SIZE_IN_BYTES: usize = u32::SIZE_IN_BYTES + 2 * u64::SIZE_IN_BYTES;
} }
impl BinarySerializable for TermInfo { impl BinarySerializable for TermInfo {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.doc_freq.serialize(writer)?; self.doc_freq.serialize(writer)?;
self.postings_start_offset.serialize(writer)?; self.postings_offset.serialize(writer)?;
self.posting_num_bytes().serialize(writer)?;
self.positions_idx.serialize(writer)?; self.positions_idx.serialize(writer)?;
Ok(()) Ok(())
} }
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> { fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let doc_freq = u32::deserialize(reader)?; let doc_freq = u32::deserialize(reader)?;
let postings_start_offset = u64::deserialize(reader)?; let postings_offset = u64::deserialize(reader)?;
let postings_num_bytes = u32::deserialize(reader)?;
let postings_stop_offset = postings_start_offset + u64::from(postings_num_bytes);
let positions_idx = u64::deserialize(reader)?; let positions_idx = u64::deserialize(reader)?;
Ok(TermInfo { Ok(TermInfo {
doc_freq, doc_freq,
postings_start_offset, postings_offset,
postings_stop_offset,
positions_idx, positions_idx,
}) })
} }

View File

@@ -9,7 +9,7 @@ use crate::Score;
/// Query that matches all of the documents. /// Query that matches all of the documents.
/// ///
/// All of the document get the score 1.0. /// All of the document get the score 1f32.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct AllQuery; pub struct AllQuery;
@@ -23,7 +23,7 @@ impl Query for AllQuery {
pub struct AllWeight; pub struct AllWeight;
impl Weight for AllWeight { impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
let all_scorer = AllScorer { let all_scorer = AllScorer {
doc: 0u32, doc: 0u32,
max_doc: reader.max_doc(), max_doc: reader.max_doc(),
@@ -35,7 +35,7 @@ impl Weight for AllWeight {
if doc >= reader.max_doc() { if doc >= reader.max_doc() {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
} }
Ok(Explanation::new("AllQuery", 1.0)) Ok(Explanation::new("AllQuery", 1f32))
} }
} }
@@ -66,7 +66,7 @@ impl DocSet for AllScorer {
impl Scorer for AllScorer { impl Scorer for AllScorer {
fn score(&mut self) -> Score { fn score(&mut self) -> Score {
1.0 1f32
} }
} }
@@ -83,7 +83,7 @@ mod tests {
let field = schema_builder.add_text_field("text", TEXT); let field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!(field=>"aaa")); index_writer.add_document(doc!(field=>"aaa"));
index_writer.add_document(doc!(field=>"bbb")); index_writer.add_document(doc!(field=>"bbb"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
@@ -100,7 +100,7 @@ mod tests {
let weight = AllQuery.weight(&searcher, false).unwrap(); let weight = AllQuery.weight(&searcher, false).unwrap();
{ {
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let mut scorer = weight.scorer(reader, 1.0).unwrap(); let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
assert_eq!(scorer.doc(), 0u32); assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), 1u32); assert_eq!(scorer.advance(), 1u32);
assert_eq!(scorer.doc(), 1u32); assert_eq!(scorer.doc(), 1u32);
@@ -108,7 +108,7 @@ mod tests {
} }
{ {
let reader = searcher.segment_reader(1); let reader = searcher.segment_reader(1);
let mut scorer = weight.scorer(reader, 1.0).unwrap(); let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
assert_eq!(scorer.doc(), 0u32); assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), TERMINATED); assert_eq!(scorer.advance(), TERMINATED);
} }
@@ -122,14 +122,14 @@ mod tests {
let weight = AllQuery.weight(&searcher, false).unwrap(); let weight = AllQuery.weight(&searcher, false).unwrap();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
{ {
let mut scorer = weight.scorer(reader, 2.0).unwrap(); let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
assert_eq!(scorer.doc(), 0u32); assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 2.0); assert_eq!(scorer.score(), 2.0f32);
} }
{ {
let mut scorer = weight.scorer(reader, 1.5).unwrap(); let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
assert_eq!(scorer.doc(), 0u32); assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.5); assert_eq!(scorer.score(), 1.5f32);
} }
} }
} }

View File

@@ -5,9 +5,9 @@ use crate::query::{BitSetDocSet, Explanation};
use crate::query::{Scorer, Weight}; use crate::query::{Scorer, Weight};
use crate::schema::{Field, IndexRecordOption}; use crate::schema::{Field, IndexRecordOption};
use crate::termdict::{TermDictionary, TermStreamer}; use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::Result;
use crate::TantivyError; use crate::TantivyError;
use crate::{DocId, Score};
use std::io;
use std::sync::Arc; use std::sync::Arc;
use tantivy_fst::Automaton; use tantivy_fst::Automaton;
@@ -20,7 +20,6 @@ pub struct AutomatonWeight<A> {
impl<A> AutomatonWeight<A> impl<A> AutomatonWeight<A>
where where
A: Automaton + Send + Sync + 'static, A: Automaton + Send + Sync + 'static,
A::State: Clone,
{ {
/// Create a new AutomationWeight /// Create a new AutomationWeight
pub fn new<IntoArcA: Into<Arc<A>>>(field: Field, automaton: IntoArcA) -> AutomatonWeight<A> { pub fn new<IntoArcA: Into<Arc<A>>>(field: Field, automaton: IntoArcA) -> AutomatonWeight<A> {
@@ -30,10 +29,7 @@ where
} }
} }
fn automaton_stream<'a>( fn automaton_stream<'a>(&'a self, term_dict: &'a TermDictionary) -> TermStreamer<'a, &'a A> {
&'a self,
term_dict: &'a TermDictionary,
) -> io::Result<TermStreamer<'a, &'a A>> {
let automaton: &A = &*self.automaton; let automaton: &A = &*self.automaton;
let term_stream_builder = term_dict.search(automaton); let term_stream_builder = term_dict.search(automaton);
term_stream_builder.into_stream() term_stream_builder.into_stream()
@@ -43,18 +39,17 @@ where
impl<A> Weight for AutomatonWeight<A> impl<A> Weight for AutomatonWeight<A>
where where
A: Automaton + Send + Sync + 'static, A: Automaton + Send + Sync + 'static,
A::State: Clone,
{ {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc(); let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc); let mut doc_bitset = BitSet::with_max_value(max_doc);
let inverted_index = reader.inverted_index(self.field)?; let inverted_index = reader.inverted_index(self.field);
let term_dict = inverted_index.terms(); let term_dict = inverted_index.terms();
let mut term_stream = self.automaton_stream(term_dict)?; let mut term_stream = self.automaton_stream(term_dict);
while term_stream.advance() { while term_stream.advance() {
let term_info = term_stream.value(); let term_info = term_stream.value();
let mut block_segment_postings = inverted_index let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?; .read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
loop { loop {
let docs = block_segment_postings.docs(); let docs = block_segment_postings.docs();
if docs.is_empty() { if docs.is_empty() {
@@ -71,10 +66,10 @@ where
Ok(Box::new(const_scorer)) Ok(Box::new(const_scorer))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?; let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.seek(doc) == doc { if scorer.seek(doc) == doc {
Ok(Explanation::new("AutomatonScorer", 1.0)) Ok(Explanation::new("AutomatonScorer", 1.0f32))
} else { } else {
Err(TantivyError::InvalidArgument( Err(TantivyError::InvalidArgument(
"Document does not exist".to_string(), "Document does not exist".to_string(),
@@ -96,7 +91,7 @@ mod tests {
let mut schema = Schema::builder(); let mut schema = Schema::builder();
let title = schema.add_text_field("title", STRING); let title = schema.add_text_field("title", STRING);
let index = Index::create_in_ram(schema.build()); let index = Index::create_in_ram(schema.build());
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(title=>"abc")); index_writer.add_document(doc!(title=>"abc"));
index_writer.add_document(doc!(title=>"bcd")); index_writer.add_document(doc!(title=>"bcd"));
index_writer.add_document(doc!(title=>"abcd")); index_writer.add_document(doc!(title=>"abcd"));
@@ -104,7 +99,6 @@ mod tests {
index index
} }
#[derive(Clone, Copy)]
enum State { enum State {
Start, Start,
NotMatching, NotMatching,
@@ -150,13 +144,13 @@ mod tests {
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let mut scorer = automaton_weight let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.0) .scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap(); .unwrap();
assert_eq!(scorer.doc(), 0u32); assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.0); assert_eq!(scorer.score(), 1.0f32);
assert_eq!(scorer.advance(), 2u32); assert_eq!(scorer.advance(), 2u32);
assert_eq!(scorer.doc(), 2u32); assert_eq!(scorer.doc(), 2u32);
assert_eq!(scorer.score(), 1.0); assert_eq!(scorer.score(), 1.0f32);
assert_eq!(scorer.advance(), TERMINATED); assert_eq!(scorer.advance(), TERMINATED);
} }
@@ -168,9 +162,9 @@ mod tests {
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let mut scorer = automaton_weight let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.32) .scorer(searcher.segment_reader(0u32), 1.32f32)
.unwrap(); .unwrap();
assert_eq!(scorer.doc(), 0u32); assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.32); assert_eq!(scorer.score(), 1.32f32);
} }
} }

View File

@@ -61,23 +61,21 @@ impl DocSet for BitSetDocSet {
} }
fn seek(&mut self, target: DocId) -> DocId { fn seek(&mut self, target: DocId) -> DocId {
if target >= self.docs.max_value() {
self.doc = TERMINATED;
return TERMINATED;
}
let target_bucket = target / 64u32; let target_bucket = target / 64u32;
// Mask for all of the bits greater or equal
// to our target document.
if target_bucket > self.cursor_bucket { if target_bucket > self.cursor_bucket {
self.go_to_bucket(target_bucket); self.go_to_bucket(target_bucket);
let greater_filter: TinySet = TinySet::range_greater_or_equal(target); let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter); self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
self.advance() self.advance();
} else {
let mut doc = self.doc();
while doc < target {
doc = self.advance();
}
doc
} }
let mut doc = self.doc();
while doc < target {
doc = self.advance();
}
doc
} }
/// Returns the current document /// Returns the current document
@@ -116,13 +114,6 @@ mod tests {
assert_eq!(empty.advance(), TERMINATED) assert_eq!(empty.advance(), TERMINATED)
} }
#[test]
fn test_seek_terminated() {
let bitset = BitSet::with_max_value(1000);
let mut empty = BitSetDocSet::from(bitset);
assert_eq!(empty.seek(TERMINATED), TERMINATED)
}
fn test_go_through_sequential(docs: &[DocId]) { fn test_go_through_sequential(docs: &[DocId]) {
let mut docset = create_docbitset(docs, 1_000u32); let mut docset = create_docbitset(docs, 1_000u32);
for &doc in docs { for &doc in docs {

View File

@@ -3,24 +3,21 @@ use crate::query::Explanation;
use crate::Score; use crate::Score;
use crate::Searcher; use crate::Searcher;
use crate::Term; use crate::Term;
use serde::Deserialize;
use serde::Serialize;
const K1: Score = 1.2; const K1: f32 = 1.2;
const B: Score = 0.75; const B: f32 = 0.75;
fn idf(doc_freq: u64, doc_count: u64) -> Score { fn idf(doc_freq: u64, doc_count: u64) -> f32 {
assert!(doc_count >= doc_freq, "{} >= {}", doc_count, doc_freq); let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5);
let x = ((doc_count - doc_freq) as Score + 0.5) / (doc_freq as Score + 0.5); (1f32 + x).ln()
(1.0 + x).ln()
} }
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: Score) -> Score { fn cached_tf_component(fieldnorm: u32, average_fieldnorm: f32) -> f32 {
K1 * (1.0 - B + B * fieldnorm as Score / average_fieldnorm) K1 * (1f32 - B + B * fieldnorm as f32 / average_fieldnorm)
} }
fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] { fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
let mut cache: [Score; 256] = [0.0; 256]; let mut cache = [0f32; 256];
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() { for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8); let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm); *cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
@@ -28,22 +25,15 @@ fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] {
cache cache
} }
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct BM25Params {
pub idf: Score,
pub avg_fieldnorm: Score,
}
#[derive(Clone)]
pub struct BM25Weight { pub struct BM25Weight {
idf_explain: Explanation, idf_explain: Explanation,
weight: Score, weight: f32,
cache: [Score; 256], cache: [f32; 256],
average_fieldnorm: Score, average_fieldnorm: f32,
} }
impl BM25Weight { impl BM25Weight {
pub fn boost_by(&self, boost: Score) -> BM25Weight { pub fn boost_by(&self, boost: f32) -> BM25Weight {
BM25Weight { BM25Weight {
idf_explain: self.idf_explain.clone(), idf_explain: self.idf_explain.clone(),
weight: self.weight * boost, weight: self.weight * boost,
@@ -52,7 +42,7 @@ impl BM25Weight {
} }
} }
pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> crate::Result<BM25Weight> { pub fn for_terms(searcher: &Searcher, terms: &[Term]) -> BM25Weight {
assert!(!terms.is_empty(), "BM25 requires at least one term"); assert!(!terms.is_empty(), "BM25 requires at least one term");
let field = terms[0].field(); let field = terms[0].field();
for term in &terms[1..] { for term in &terms[1..] {
@@ -66,48 +56,38 @@ impl BM25Weight {
let mut total_num_tokens = 0u64; let mut total_num_tokens = 0u64;
let mut total_num_docs = 0u64; let mut total_num_docs = 0u64;
for segment_reader in searcher.segment_readers() { for segment_reader in searcher.segment_readers() {
let inverted_index = segment_reader.inverted_index(field)?; let inverted_index = segment_reader.inverted_index(field);
total_num_tokens += inverted_index.total_num_tokens(); total_num_tokens += inverted_index.total_num_tokens();
total_num_docs += u64::from(segment_reader.max_doc()); total_num_docs += u64::from(segment_reader.max_doc());
} }
let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score; let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
let mut idf_explain: Explanation;
if terms.len() == 1 { if terms.len() == 1 {
let term_doc_freq = searcher.doc_freq(&terms[0])?; let term_doc_freq = searcher.doc_freq(&terms[0]);
Ok(BM25Weight::for_one_term( let idf = idf(term_doc_freq, total_num_docs);
term_doc_freq, idf_explain =
total_num_docs, Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
average_fieldnorm, idf_explain.add_const(
)) "n, number of docs containing this term",
term_doc_freq as f32,
);
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
} else { } else {
let mut idf_sum: Score = 0.0; let idf = terms
for term in terms { .iter()
let term_doc_freq = searcher.doc_freq(term)?; .map(|term| {
idf_sum += idf(term_doc_freq, total_num_docs); let term_doc_freq = searcher.doc_freq(term);
} idf(term_doc_freq, total_num_docs)
let idf_explain = Explanation::new("idf", idf_sum); })
Ok(BM25Weight::new(idf_explain, average_fieldnorm)) .sum::<f32>();
idf_explain = Explanation::new("idf", idf);
} }
BM25Weight::new(idf_explain, average_fieldnorm)
} }
pub fn for_one_term( fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
term_doc_freq: u64, let weight = idf_explain.value() * (1f32 + K1);
total_num_docs: u64,
avg_fieldnorm: Score,
) -> BM25Weight {
let idf = idf(term_doc_freq, total_num_docs);
let mut idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as Score,
);
idf_explain.add_const("N, total number of docs", total_num_docs as Score);
BM25Weight::new(idf_explain, avg_fieldnorm)
}
pub(crate) fn new(idf_explain: Explanation, average_fieldnorm: Score) -> BM25Weight {
let weight = idf_explain.value() * (1.0 + K1);
BM25Weight { BM25Weight {
idf_explain, idf_explain,
weight, weight,
@@ -118,27 +98,19 @@ impl BM25Weight {
#[inline(always)] #[inline(always)]
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score { pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
self.weight * self.tf_factor(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> Score {
self.score(255u8, 2_013_265_944)
}
#[inline(always)]
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
let term_freq = term_freq as Score;
let norm = self.cache[fieldnorm_id as usize]; let norm = self.cache[fieldnorm_id as usize];
term_freq / (term_freq + norm) let term_freq = term_freq as f32;
self.weight * term_freq / (term_freq + norm)
} }
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation { pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
// The explain format is directly copied from Lucene's. // The explain format is directly copied from Lucene's.
// (So, Kudos to Lucene) // (So, Kudos to Lucene)
let score = self.score(fieldnorm_id, term_freq); let score = self.score(fieldnorm_id, term_freq);
let norm = self.cache[fieldnorm_id as usize]; let norm = self.cache[fieldnorm_id as usize];
let term_freq = term_freq as Score; let term_freq = term_freq as f32;
let right_factor = term_freq / (term_freq + norm); let right_factor = term_freq / (term_freq + norm);
let mut tf_explanation = Explanation::new( let mut tf_explanation = Explanation::new(
@@ -151,12 +123,12 @@ impl BM25Weight {
tf_explanation.add_const("b, length normalization parameter", B); tf_explanation.add_const("b, length normalization parameter", B);
tf_explanation.add_const( tf_explanation.add_const(
"dl, length of field", "dl, length of field",
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as Score, FieldNormReader::id_to_fieldnorm(fieldnorm_id) as f32,
); );
tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm); tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm);
let mut explanation = Explanation::new("TermQuery, product of...", score); let mut explanation = Explanation::new("TermQuery, product of...", score);
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1.0)); explanation.add_detail(Explanation::new("(K1+1)", K1 + 1f32));
explanation.add_detail(self.idf_explain.clone()); explanation.add_detail(self.idf_explain.clone());
explanation.add_detail(tf_explanation); explanation.add_detail(tf_explanation);
explanation explanation
@@ -167,11 +139,10 @@ impl BM25Weight {
mod tests { mod tests {
use super::idf; use super::idf;
use crate::{assert_nearly_equals, Score}; use crate::assert_nearly_equals;
#[test] #[test]
fn test_idf() { fn test_idf() {
let score: Score = 2.0; assert_nearly_equals!(idf(1, 2), 0.6931472);
assert_nearly_equals!(idf(1, 2), score.ln());
} }
} }

View File

@@ -1,541 +0,0 @@
use crate::query::term_query::TermScorer;
use crate::query::Scorer;
use crate::{DocId, DocSet, Score, TERMINATED};
use std::ops::Deref;
use std::ops::DerefMut;
/// Takes a term_scorers sorted by their current doc() and a threshold and returns
/// Returns (pivot_len, pivot_ord) defined as follows:
/// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score.
/// - `before_pivot_len` number of term_scorers such that term_scorer.doc() < pivot.
/// - `pivot_len` number of term_scorers such that term_scorer.doc() <= pivot.
///
/// We always have `before_pivot_len` < `pivot_len`.
///
/// None is returned if we establish that no document can exceed the threshold.
fn find_pivot_doc(
term_scorers: &[TermScorerWithMaxScore],
threshold: Score,
) -> Option<(usize, usize, DocId)> {
let mut max_score = 0.0;
let mut before_pivot_len = 0;
let mut pivot_doc = TERMINATED;
while before_pivot_len < term_scorers.len() {
let term_scorer = &term_scorers[before_pivot_len];
max_score += term_scorer.max_score;
if max_score > threshold {
pivot_doc = term_scorer.doc();
break;
}
before_pivot_len += 1;
}
if pivot_doc == TERMINATED {
return None;
}
// Right now i is an ordinal, we want a len.
let mut pivot_len = before_pivot_len + 1;
// Some other term_scorer may be positioned on the same document.
pivot_len += term_scorers[pivot_len..]
.iter()
.take_while(|term_scorer| term_scorer.doc() == pivot_doc)
.count();
Some((before_pivot_len, pivot_len, pivot_doc))
}
// Before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer(
scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_len: usize,
) {
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
let mut scorer_to_seek = pivot_len - 1;
let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
for scorer_ord in (0..pivot_len - 1).rev() {
let scorer = &scorers[scorer_ord];
if scorer.last_doc_in_block() <= doc_to_seek_after {
doc_to_seek_after = scorer.last_doc_in_block();
scorer_to_seek = scorer_ord;
}
}
for scorer in &scorers[pivot_len..] {
if scorer.doc() <= doc_to_seek_after {
doc_to_seek_after = scorer.doc();
}
}
scorers[scorer_to_seek].seek(doc_to_seek_after + 1);
restore_ordering(scorers, scorer_to_seek);
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
}
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
// except term_scorers[ord] that might be in advance compared to its ranks,
// bubble up term_scorers[ord] in order to restore the ordering.
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
let doc = term_scorers[ord].doc();
for i in ord + 1..term_scorers.len() {
if term_scorers[i].doc() >= doc {
break;
}
term_scorers.swap(i, i - 1);
}
debug_assert!(is_sorted(term_scorers.iter().map(|scorer| scorer.doc())));
}
// Attempts to advance all term_scorers between `&term_scorers[0..before_len]` to the pivot.
// If this works, return true.
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
fn align_scorers(
term_scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_doc: DocId,
before_pivot_len: usize,
) -> bool {
debug_assert_ne!(pivot_doc, TERMINATED);
for i in (0..before_pivot_len).rev() {
let new_doc = term_scorers[i].seek(pivot_doc);
if new_doc != pivot_doc {
if new_doc == TERMINATED {
term_scorers.swap_remove(i);
}
// We went past the pivot.
// We just go through the outer loop mechanic (Note that pivot is
// still a possible candidate).
//
// Termination is still guaranteed since we can only consider the same
// pivot at most term_scorers.len() - 1 times.
restore_ordering(term_scorers, i);
return false;
}
}
true
}
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
// Restores the ordering of term_scorers.
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
for term_scorer in &mut term_scorers[..pivot_len] {
term_scorer.advance();
}
// TODO use drain_filter when available.
let mut i = 0;
while i != term_scorers.len() {
if term_scorers[i].doc() == TERMINATED {
term_scorers.swap_remove(i);
} else {
i += 1;
}
}
term_scorers.sort_by_key(|scorer| scorer.doc());
}
pub fn block_wand(
mut scorers: Vec<TermScorer>,
mut threshold: Score,
callback: &mut dyn FnMut(u32, Score) -> Score,
) {
let mut scorers: Vec<TermScorerWithMaxScore> = scorers
.iter_mut()
.map(TermScorerWithMaxScore::from)
.collect();
scorers.sort_by_key(|scorer| scorer.doc());
// At this point we need to ensure that the scorers are sorted!
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
while let Some((before_pivot_len, pivot_len, pivot_doc)) =
find_pivot_doc(&scorers[..], threshold)
{
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
debug_assert_ne!(pivot_doc, TERMINATED);
debug_assert!(before_pivot_len < pivot_len);
let block_max_score_upperbound: Score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| {
scorer.shallow_seek(pivot_doc);
scorer.block_max_score()
})
.sum();
// Beware after shallow advance, skip readers can be in advance compared to
// the segment posting lists.
//
// `block_segment_postings.load_block()` need to be called separately.
if block_max_score_upperbound <= threshold {
// Block max condition was not reached
// We could get away by simply advancing the scorers to DocId + 1 but it would
// be inefficient. The optimization requires proper explanation and was
// isolated in a different function.
block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
continue;
}
// Block max condition is observed.
//
// Let's try and advance all scorers before the pivot to the pivot.
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
// At least of the scorer does not contain the pivot.
//
// Let's stop scoring this pivot and go through the pivot selection again.
// Note that the current pivot is not necessarily a bad candidate and it
// may be picked again.
continue;
}
// At this point, all scorers are positioned on the doc.
let score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| scorer.score())
.sum();
if score > threshold {
threshold = callback(pivot_doc, score);
}
// let's advance all of the scorers that are currently positioned on the pivot.
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
}
}
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: Score,
}
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore { scorer, max_score }
}
}
impl<'a> Deref for TermScorerWithMaxScore<'a> {
type Target = TermScorer;
fn deref(&self) -> &Self::Target {
self.scorer
}
}
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
}
}
fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
if let Some(first) = it.next() {
let mut prev = first;
for doc in it {
if doc < prev {
return false;
}
prev = doc;
}
}
true
}
#[cfg(test)]
mod tests {
use crate::query::score_combiner::SumCombiner;
use crate::query::term_query::TermScorer;
use crate::query::Union;
use crate::query::{BM25Weight, Scorer};
use crate::{DocId, DocSet, Score, TERMINATED};
use proptest::prelude::*;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::iter;
struct Float(Score);
impl Eq for Float {}
impl PartialEq for Float {
fn eq(&self, other: &Self) -> bool {
self.cmp(&other) == Ordering::Equal
}
}
impl PartialOrd for Float {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Float {
fn cmp(&self, other: &Self) -> Ordering {
other.0.partial_cmp(&self.0).unwrap_or(Ordering::Equal)
}
}
fn nearly_equals(left: Score, right: Score) -> bool {
(left - right).abs() < 0.0001 * (left + right).abs()
}
fn compute_checkpoints_for_each_pruning(
term_scorers: Vec<TermScorer>,
n: usize,
) -> Vec<(DocId, Score)> {
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
let mut limit: Score = 0.0;
super::block_wand(term_scorers, Score::MIN, &mut |doc, score| {
heap.push(Float(score));
if heap.len() > n {
heap.pop().unwrap();
}
if heap.len() == n {
limit = heap.peek().unwrap().0;
}
if !nearly_equals(score, limit) {
checkpoints.push((doc, score));
}
return limit;
});
checkpoints
}
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
let mut scorer: Union<TermScorer, SumCombiner> = Union::from(term_scorers);
let mut limit = Score::MIN;
loop {
if scorer.doc() == TERMINATED {
break;
}
let doc = scorer.doc();
let score = scorer.score();
if score > limit {
heap.push(Float(score));
if heap.len() > n {
heap.pop().unwrap();
}
if heap.len() == n {
limit = heap.peek().unwrap().0;
}
if !nearly_equals(score, limit) {
checkpoints.push((doc, score));
}
}
scorer.advance();
}
checkpoints
}
const MAX_TERM_FREQ: u32 = 100u32;
fn posting_list(max_doc: u32) -> BoxedStrategy<Vec<(DocId, u32)>> {
(1..max_doc + 1)
.prop_flat_map(move |doc_freq| {
(
proptest::bits::bitset::sampled(doc_freq as usize, 0..max_doc as usize),
proptest::collection::vec(1u32..MAX_TERM_FREQ, doc_freq as usize),
)
})
.prop_map(|(docset, term_freqs)| {
docset
.iter()
.map(|doc| doc as u32)
.zip(term_freqs.iter().cloned())
.collect::<Vec<_>>()
})
.boxed()
}
fn gen_term_scorers(num_scorers: usize) -> BoxedStrategy<(Vec<Vec<(DocId, u32)>>, Vec<u32>)> {
(1u32..100u32)
.prop_flat_map(move |max_doc: u32| {
(
proptest::collection::vec(posting_list(max_doc), num_scorers),
proptest::collection::vec(2u32..10u32 * MAX_TERM_FREQ, max_doc as usize),
)
})
.boxed()
}
fn test_block_wand_aux(posting_lists: &[Vec<(DocId, u32)>], fieldnorms: &[u32]) {
// We virtually repeat all docs 64 times in order to emulate blocks of 2 documents
// and surface blogs more easily.
const REPEAT: usize = 64;
let fieldnorms_expanded = fieldnorms
.iter()
.cloned()
.flat_map(|fieldnorm| iter::repeat(fieldnorm).take(REPEAT))
.collect::<Vec<u32>>();
let postings_lists_expanded: Vec<Vec<(DocId, u32)>> = posting_lists
.iter()
.map(|posting_list| {
posting_list
.into_iter()
.cloned()
.flat_map(|(doc, term_freq)| {
(0 as u32..REPEAT as u32).map(move |offset| {
(
doc * (REPEAT as u32) + offset,
if offset == 0 { term_freq } else { 1 },
)
})
})
.collect::<Vec<(DocId, u32)>>()
})
.collect::<Vec<_>>();
let total_fieldnorms: u64 = fieldnorms_expanded
.iter()
.cloned()
.map(|fieldnorm| fieldnorm as u64)
.sum();
let average_fieldnorm = (total_fieldnorms as Score) / (fieldnorms_expanded.len() as Score);
let max_doc = fieldnorms_expanded.len();
let term_scorers: Vec<TermScorer> = postings_lists_expanded
.iter()
.map(|postings| {
let bm25_weight = BM25Weight::for_one_term(
postings.len() as u64,
max_doc as u64,
average_fieldnorm,
);
TermScorer::create_for_test(postings, &fieldnorms_expanded[..], bm25_weight)
})
.collect();
for top_k in 1..4 {
let checkpoints_for_each_pruning =
compute_checkpoints_for_each_pruning(term_scorers.clone(), top_k);
let checkpoints_manual = compute_checkpoints_manual(term_scorers.clone(), top_k);
assert_eq!(checkpoints_for_each_pruning.len(), checkpoints_manual.len());
for (&(left_doc, left_score), &(right_doc, right_score)) in checkpoints_for_each_pruning
.iter()
.zip(checkpoints_manual.iter())
{
assert_eq!(left_doc, right_doc);
assert!(nearly_equals(left_score, right_score));
}
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_block_wand_two_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(2)) {
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
}
}
#[test]
fn test_fn_reproduce_proptest() {
let postings_lists = &[
vec![
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(6, 1),
(7, 7),
(8, 1),
(10, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(19, 1),
(20, 1),
(21, 1),
(22, 1),
(24, 1),
(25, 1),
(26, 1),
(28, 1),
(30, 1),
(31, 1),
(33, 1),
(34, 1),
(35, 1),
(36, 95),
(37, 1),
(39, 1),
(41, 1),
(44, 1),
(46, 1),
],
vec![
(0, 5),
(2, 1),
(4, 1),
(5, 84),
(6, 47),
(7, 26),
(8, 50),
(9, 34),
(11, 73),
(12, 11),
(13, 51),
(14, 45),
(15, 18),
(18, 60),
(19, 80),
(20, 63),
(23, 79),
(24, 69),
(26, 35),
(28, 82),
(29, 19),
(30, 2),
(31, 7),
(33, 40),
(34, 1),
(35, 33),
(36, 27),
(37, 24),
(38, 65),
(39, 32),
(40, 85),
(41, 1),
(42, 69),
(43, 11),
(45, 45),
(47, 97),
],
vec![
(2, 1),
(4, 1),
(7, 94),
(8, 1),
(9, 1),
(10, 1),
(12, 1),
(15, 1),
(22, 1),
(23, 1),
(26, 1),
(27, 1),
(32, 1),
(33, 1),
(34, 1),
(36, 96),
(39, 1),
(41, 1),
],
];
let fieldnorms = &[
685, 239, 780, 564, 664, 827, 5, 56, 930, 887, 263, 665, 167, 127, 120, 919, 292, 92,
489, 734, 814, 724, 700, 304, 128, 779, 311, 877, 774, 15, 866, 368, 894, 371, 982,
502, 507, 669, 680, 76, 594, 626, 578, 331, 170, 639, 665, 186,
][..];
test_block_wand_aux(postings_lists, fieldnorms);
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[ignore]
#[test]
#[ignore]
fn test_block_wand_three_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(3)) {
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
}
}
}

View File

@@ -83,7 +83,7 @@ use std::collections::BTreeSet;
/// ]; /// ];
/// // Make a BooleanQuery equivalent to /// // Make a BooleanQuery equivalent to
/// // title:+diary title:-girl /// // title:+diary title:-girl
/// let diary_must_and_girl_mustnot = BooleanQuery::new(queries_with_occurs1); /// let diary_must_and_girl_mustnot = BooleanQuery::from(queries_with_occurs1);
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?; /// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
/// assert_eq!(count1, 1); /// assert_eq!(count1, 1);
/// ///
@@ -93,7 +93,7 @@ use std::collections::BTreeSet;
/// IndexRecordOption::Basic, /// IndexRecordOption::Basic,
/// )); /// ));
/// // "title:diary OR title:cow" /// // "title:diary OR title:cow"
/// let title_diary_or_cow = BooleanQuery::new(vec![ /// let title_diary_or_cow = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()), /// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, cow_term_query), /// (Occur::Should, cow_term_query),
/// ]); /// ]);
@@ -108,7 +108,7 @@ use std::collections::BTreeSet;
/// // You can combine subqueries of different types into 1 BooleanQuery: /// // You can combine subqueries of different types into 1 BooleanQuery:
/// // `TermQuery` and `PhraseQuery` /// // `TermQuery` and `PhraseQuery`
/// // "title:diary OR "dairy cow" /// // "title:diary OR "dairy cow"
/// let term_of_phrase_query = BooleanQuery::new(vec![ /// let term_of_phrase_query = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()), /// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, phrase_query.box_clone()), /// (Occur::Should, phrase_query.box_clone()),
/// ]); /// ]);
@@ -117,7 +117,7 @@ use std::collections::BTreeSet;
/// ///
/// // You can nest one BooleanQuery inside another /// // You can nest one BooleanQuery inside another
/// // body:found AND ("title:diary OR "dairy cow") /// // body:found AND ("title:diary OR "dairy cow")
/// let nested_query = BooleanQuery::new(vec![ /// let nested_query = BooleanQuery::from(vec![
/// (Occur::Must, body_term_query), /// (Occur::Must, body_term_query),
/// (Occur::Must, Box::new(term_of_phrase_query)) /// (Occur::Must, Box::new(term_of_phrase_query))
/// ]); /// ]);
@@ -143,7 +143,7 @@ impl Clone for BooleanQuery {
impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery { impl From<Vec<(Occur, Box<dyn Query>)>> for BooleanQuery {
fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery { fn from(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
BooleanQuery::new(subqueries) BooleanQuery { subqueries }
} }
} }
@@ -167,23 +167,6 @@ impl Query for BooleanQuery {
} }
impl BooleanQuery { impl BooleanQuery {
/// Creates a new boolean query.
pub fn new(subqueries: Vec<(Occur, Box<dyn Query>)>) -> BooleanQuery {
BooleanQuery { subqueries }
}
/// Returns the intersection of the queries.
pub fn intersection(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
let subqueries = queries.into_iter().map(|s| (Occur::Must, s)).collect();
BooleanQuery::new(subqueries)
}
/// Returns the union of the queries.
pub fn union(queries: Vec<Box<dyn Query>>) -> BooleanQuery {
let subqueries = queries.into_iter().map(|s| (Occur::Should, s)).collect();
BooleanQuery::new(subqueries)
}
/// Helper method to create a boolean query matching a given list of terms. /// Helper method to create a boolean query matching a given list of terms.
/// The resulting query is a disjunction of the terms. /// The resulting query is a disjunction of the terms.
pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery { pub fn new_multiterms_query(terms: Vec<Term>) -> BooleanQuery {
@@ -195,7 +178,7 @@ impl BooleanQuery {
(Occur::Should, term_query) (Occur::Should, term_query)
}) })
.collect(); .collect();
BooleanQuery::new(occur_term_queries) BooleanQuery::from(occur_term_queries)
} }
/// Deconstructed view of the clauses making up this query. /// Deconstructed view of the clauses making up this query.
@@ -203,77 +186,3 @@ impl BooleanQuery {
&self.subqueries[..] &self.subqueries[..]
} }
} }
#[cfg(test)]
mod tests {
use super::BooleanQuery;
use crate::collector::DocSetCollector;
use crate::query::{QueryClone, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TEXT};
use crate::{DocAddress, Index, Term};
fn create_test_index() -> crate::Result<Index> {
let mut schema_builder = Schema::builder();
let text = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer = index.writer_for_tests().unwrap();
writer.add_document(doc!(text=>"b c"));
writer.add_document(doc!(text=>"a c"));
writer.add_document(doc!(text=>"a b"));
writer.add_document(doc!(text=>"a d"));
writer.commit()?;
Ok(index)
}
#[test]
fn test_union() -> crate::Result<()> {
let index = create_test_index()?;
let searcher = index.reader()?.searcher();
let text = index.schema().get_field("text").unwrap();
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
let term_d = TermQuery::new(Term::from_field_text(text, "d"), IndexRecordOption::Basic);
let union_ad = BooleanQuery::union(vec![term_a.box_clone(), term_d.box_clone()]);
let docs = searcher.search(&union_ad, &DocSetCollector)?;
assert_eq!(
docs,
vec![
DocAddress(0u32, 1u32),
DocAddress(0u32, 2u32),
DocAddress(0u32, 3u32)
]
.into_iter()
.collect()
);
Ok(())
}
#[test]
fn test_intersection() -> crate::Result<()> {
let index = create_test_index()?;
let searcher = index.reader()?.searcher();
let text = index.schema().get_field("text").unwrap();
let term_a = TermQuery::new(Term::from_field_text(text, "a"), IndexRecordOption::Basic);
let term_b = TermQuery::new(Term::from_field_text(text, "b"), IndexRecordOption::Basic);
let term_c = TermQuery::new(Term::from_field_text(text, "c"), IndexRecordOption::Basic);
let intersection_ab =
BooleanQuery::intersection(vec![term_a.box_clone(), term_b.box_clone()]);
let intersection_ac =
BooleanQuery::intersection(vec![term_a.box_clone(), term_c.box_clone()]);
let intersection_bc =
BooleanQuery::intersection(vec![term_b.box_clone(), term_c.box_clone()]);
{
let docs = searcher.search(&intersection_ab, &DocSetCollector)?;
assert_eq!(docs, vec![DocAddress(0u32, 2u32)].into_iter().collect());
}
{
let docs = searcher.search(&intersection_ac, &DocSetCollector)?;
assert_eq!(docs, vec![DocAddress(0u32, 1u32)].into_iter().collect());
}
{
let docs = searcher.search(&intersection_bc, &DocSetCollector)?;
assert_eq!(docs, vec![DocAddress(0u32, 0u32)].into_iter().collect());
}
Ok(())
}
}

View File

@@ -1,5 +1,4 @@
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::postings::FreqReadingOption;
use crate::query::explanation::does_not_match; use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner}; use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
use crate::query::term_query::TermScorer; use crate::query::term_query::TermScorer;
@@ -15,12 +14,12 @@ use crate::query::{intersect_scorers, Explanation};
use crate::{DocId, Score}; use crate::{DocId, Score};
use std::collections::HashMap; use std::collections::HashMap;
enum SpecializedScorer { enum SpecializedScorer<TScoreCombiner: ScoreCombiner> {
TermUnion(Vec<TermScorer>), TermUnion(Union<TermScorer, TScoreCombiner>),
Other(Box<dyn Scorer>), Other(Box<dyn Scorer>),
} }
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer<TScoreCombiner>
where where
TScoreCombiner: ScoreCombiner, TScoreCombiner: ScoreCombiner,
{ {
@@ -36,29 +35,20 @@ where
.into_iter() .into_iter()
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap())) .map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
.collect(); .collect();
if scorers return SpecializedScorer::TermUnion(Union::<TermScorer, TScoreCombiner>::from(
.iter() scorers,
.all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq) ));
{
// Block wand is only available iff we read frequencies.
return SpecializedScorer::TermUnion(scorers);
} else {
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
scorers,
)));
}
} }
} }
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers))) SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
} }
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(scorer: SpecializedScorer) -> Box<dyn Scorer> { impl<TScoreCombiner: ScoreCombiner> Into<Box<dyn Scorer>> for SpecializedScorer<TScoreCombiner> {
match scorer { fn into(self) -> Box<dyn Scorer> {
SpecializedScorer::TermUnion(term_scorers) => { match self {
let union_scorer = Union::<TermScorer, TScoreCombiner>::from(term_scorers); Self::TermUnion(union) => Box::new(union),
Box::new(union_scorer) Self::Other(scorer) => scorer,
} }
SpecializedScorer::Other(scorer) => scorer,
} }
} }
@@ -78,7 +68,7 @@ impl BooleanWeight {
fn per_occur_scorers( fn per_occur_scorers(
&self, &self,
reader: &SegmentReader, reader: &SegmentReader,
boost: Score, boost: f32,
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> { ) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new(); let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
for &(ref occur, ref subweight) in &self.weights { for &(ref occur, ref subweight) in &self.weights {
@@ -94,48 +84,47 @@ impl BooleanWeight {
fn complex_scorer<TScoreCombiner: ScoreCombiner>( fn complex_scorer<TScoreCombiner: ScoreCombiner>(
&self, &self,
reader: &SegmentReader, reader: &SegmentReader,
boost: Score, boost: f32,
) -> crate::Result<SpecializedScorer> { ) -> crate::Result<SpecializedScorer<TScoreCombiner>> {
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?; let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
let should_scorer_opt: Option<SpecializedScorer> = per_occur_scorers let should_scorer_opt: Option<SpecializedScorer<TScoreCombiner>> = per_occur_scorers
.remove(&Occur::Should) .remove(&Occur::Should)
.map(scorer_union::<TScoreCombiner>); .map(scorer_union::<TScoreCombiner>);
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::MustNot) .remove(&Occur::MustNot)
.map(scorer_union::<DoNothingCombiner>) .map(scorer_union::<DoNothingCombiner>)
.map(into_box_scorer::<DoNothingCombiner>); .map(Into::into);
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Must) .remove(&Occur::Must)
.map(intersect_scorers); .map(intersect_scorers);
let positive_scorer: SpecializedScorer = match (should_scorer_opt, must_scorer_opt) { let positive_scorer: SpecializedScorer<TScoreCombiner> =
(Some(should_scorer), Some(must_scorer)) => { match (should_scorer_opt, must_scorer_opt) {
if self.scoring_enabled { (Some(should_scorer), Some(must_scorer)) => {
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::< if self.scoring_enabled {
Box<dyn Scorer>, SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
Box<dyn Scorer>, Box<dyn Scorer>,
TScoreCombiner, Box<dyn Scorer>,
>::new( TScoreCombiner,
must_scorer, >::new(
into_box_scorer::<TScoreCombiner>(should_scorer), must_scorer, should_scorer.into()
))) )))
} else { } else {
SpecializedScorer::Other(must_scorer) SpecializedScorer::Other(must_scorer)
}
} }
} (None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer), (Some(should_scorer), None) => should_scorer,
(Some(should_scorer), None) => should_scorer, (None, None) => {
(None, None) => { return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer))); }
} };
};
if let Some(exclude_scorer) = exclude_scorer_opt { if let Some(exclude_scorer) = exclude_scorer_opt {
let positive_scorer_boxed: Box<dyn Scorer> = let positive_scorer_boxed: Box<dyn Scorer> = positive_scorer.into();
into_box_scorer::<TScoreCombiner>(positive_scorer);
Ok(SpecializedScorer::Other(Box::new(Exclude::new( Ok(SpecializedScorer::Other(Box::new(Exclude::new(
positive_scorer_boxed, positive_scorer_boxed,
exclude_scorer, exclude_scorer,
@@ -147,7 +136,7 @@ impl BooleanWeight {
} }
impl Weight for BooleanWeight { impl Weight for BooleanWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
if self.weights.is_empty() { if self.weights.is_empty() {
Ok(Box::new(EmptyScorer)) Ok(Box::new(EmptyScorer))
} else if self.weights.len() == 1 { } else if self.weights.len() == 1 {
@@ -159,22 +148,20 @@ impl Weight for BooleanWeight {
} }
} else if self.scoring_enabled { } else if self.scoring_enabled {
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost) self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
.map(|specialized_scorer| { .map(Into::into)
into_box_scorer::<SumWithCoordsCombiner>(specialized_scorer)
})
} else { } else {
self.complex_scorer::<DoNothingCombiner>(reader, boost) self.complex_scorer::<DoNothingCombiner>(reader, boost)
.map(into_box_scorer::<DoNothingCombiner>) .map(Into::into)
} }
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?; let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.seek(doc) != doc { if scorer.seek(doc) != doc {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
} }
if !self.scoring_enabled { if !self.scoring_enabled {
return Ok(Explanation::new("BooleanQuery with no scoring", 1.0)); return Ok(Explanation::new("BooleanQuery with no scoring", 1f32));
} }
let mut explanation = Explanation::new("BooleanClause. Sum of ...", scorer.score()); let mut explanation = Explanation::new("BooleanClause. Sum of ...", scorer.score());
@@ -193,11 +180,9 @@ impl Weight for BooleanWeight {
reader: &SegmentReader, reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score), callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> { ) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?; let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer { match scorer {
SpecializedScorer::TermUnion(term_scorers) => { SpecializedScorer::TermUnion(mut union_scorer) => {
let mut union_scorer =
Union::<TermScorer, SumWithCoordsCombiner>::from(term_scorers);
for_each_scorer(&mut union_scorer, callback); for_each_scorer(&mut union_scorer, callback);
} }
SpecializedScorer::Other(mut scorer) => { SpecializedScorer::Other(mut scorer) => {
@@ -219,14 +204,14 @@ impl Weight for BooleanWeight {
/// important optimization (e.g. BlockWAND for union). /// important optimization (e.g. BlockWAND for union).
fn for_each_pruning( fn for_each_pruning(
&self, &self,
threshold: Score, threshold: f32,
reader: &SegmentReader, reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score, callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> { ) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?; let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer { match scorer {
SpecializedScorer::TermUnion(term_scorers) => { SpecializedScorer::TermUnion(mut union_scorer) => {
super::block_wand(term_scorers, threshold, callback); for_each_pruning_scorer(&mut union_scorer, threshold, callback);
} }
SpecializedScorer::Other(mut scorer) => { SpecializedScorer::Other(mut scorer) => {
for_each_pruning_scorer(scorer.as_mut(), threshold, callback); for_each_pruning_scorer(scorer.as_mut(), threshold, callback);

View File

@@ -1,8 +1,6 @@
mod block_wand;
mod boolean_query; mod boolean_query;
mod boolean_weight; mod boolean_weight;
pub(crate) use self::block_wand::block_wand;
pub use self::boolean_query::BooleanQuery; pub use self::boolean_query::BooleanQuery;
#[cfg(test)] #[cfg(test)]
@@ -32,12 +30,14 @@ mod tests {
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "a b c")); {
index_writer.add_document(doc!(text_field => "a c")); index_writer.add_document(doc!(text_field => "a b c"));
index_writer.add_document(doc!(text_field => "b c")); index_writer.add_document(doc!(text_field => "a c"));
index_writer.add_document(doc!(text_field => "a b c d")); index_writer.add_document(doc!(text_field => "b c"));
index_writer.add_document(doc!(text_field => "d")); index_writer.add_document(doc!(text_field => "a b c d"));
index_writer.add_document(doc!(text_field => "d"));
}
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
(index, text_field) (index, text_field)
@@ -59,7 +59,9 @@ mod tests {
let query = query_parser.parse_query("+a").unwrap(); let query = query_parser.parse_query("+a").unwrap();
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap(); let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<TermScorer>()); assert!(scorer.is::<TermScorer>());
} }
@@ -71,13 +73,17 @@ mod tests {
{ {
let query = query_parser.parse_query("+a +b +c").unwrap(); let query = query_parser.parse_query("+a +b +c").unwrap();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap(); let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<Intersection<TermScorer>>()); assert!(scorer.is::<Intersection<TermScorer>>());
} }
{ {
let query = query_parser.parse_query("+a +(b c)").unwrap(); let query = query_parser.parse_query("+a +(b c)").unwrap();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap(); let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>()); assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
} }
} }
@@ -90,7 +96,9 @@ mod tests {
{ {
let query = query_parser.parse_query("+a b").unwrap(); let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap(); let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<RequiredOptionalScorer< assert!(scorer.is::<RequiredOptionalScorer<
Box<dyn Scorer>, Box<dyn Scorer>,
Box<dyn Scorer>, Box<dyn Scorer>,
@@ -100,7 +108,9 @@ mod tests {
{ {
let query = query_parser.parse_query("+a b").unwrap(); let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, false).unwrap(); let weight = query.weight(&searcher, false).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap(); let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<TermScorer>()); assert!(scorer.is::<TermScorer>());
} }
} }
@@ -132,29 +142,29 @@ mod tests {
.collect::<Vec<DocId>>() .collect::<Vec<DocId>>()
}; };
{ {
let boolean_query = BooleanQuery::new(vec![(Occur::Must, make_term_query("a"))]); let boolean_query = BooleanQuery::from(vec![(Occur::Must, make_term_query("a"))]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![(Occur::Should, make_term_query("a"))]); let boolean_query = BooleanQuery::from(vec![(Occur::Should, make_term_query("a"))]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Should, make_term_query("a")), (Occur::Should, make_term_query("a")),
(Occur::Should, make_term_query("b")), (Occur::Should, make_term_query("b")),
]); ]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 2, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 2, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Must, make_term_query("a")), (Occur::Must, make_term_query("a")),
(Occur::Should, make_term_query("b")), (Occur::Should, make_term_query("b")),
]); ]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]); assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Must, make_term_query("a")), (Occur::Must, make_term_query("a")),
(Occur::Should, make_term_query("b")), (Occur::Should, make_term_query("b")),
(Occur::MustNot, make_term_query("d")), (Occur::MustNot, make_term_query("d")),
@@ -162,7 +172,7 @@ mod tests {
assert_eq!(matching_docs(&boolean_query), vec![0, 1]); assert_eq!(matching_docs(&boolean_query), vec![0, 1]);
} }
{ {
let boolean_query = BooleanQuery::new(vec![(Occur::MustNot, make_term_query("d"))]); let boolean_query = BooleanQuery::from(vec![(Occur::MustNot, make_term_query("d"))]);
assert_eq!(matching_docs(&boolean_query), Vec::<u32>::new()); assert_eq!(matching_docs(&boolean_query), Vec::<u32>::new());
} }
} }
@@ -192,7 +202,7 @@ mod tests {
let score_doc_4: Score; // score of doc 4 should not be influenced by exclusion let score_doc_4: Score; // score of doc 4 should not be influenced by exclusion
{ {
let boolean_query_no_excluded = let boolean_query_no_excluded =
BooleanQuery::new(vec![(Occur::Must, make_term_query("d"))]); BooleanQuery::from(vec![(Occur::Must, make_term_query("d"))]);
let topdocs_no_excluded = matching_topdocs(&boolean_query_no_excluded); let topdocs_no_excluded = matching_topdocs(&boolean_query_no_excluded);
assert_eq!(topdocs_no_excluded.len(), 2); assert_eq!(topdocs_no_excluded.len(), 2);
let (top_score, top_doc) = topdocs_no_excluded[0]; let (top_score, top_doc) = topdocs_no_excluded[0];
@@ -202,7 +212,7 @@ mod tests {
} }
{ {
let boolean_query_two_excluded = BooleanQuery::new(vec![ let boolean_query_two_excluded = BooleanQuery::from(vec![
(Occur::Must, make_term_query("d")), (Occur::Must, make_term_query("d")),
(Occur::MustNot, make_term_query("a")), (Occur::MustNot, make_term_query("a")),
(Occur::MustNot, make_term_query("b")), (Occur::MustNot, make_term_query("b")),
@@ -222,7 +232,7 @@ mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
{ {
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "a b c")); index_writer.add_document(doc!(text_field => "a b c"));
index_writer.add_document(doc!(text_field => "a c")); index_writer.add_document(doc!(text_field => "a c"));
index_writer.add_document(doc!(text_field => "b c")); index_writer.add_document(doc!(text_field => "b c"));
@@ -239,21 +249,21 @@ mod tests {
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let boolean_query = let boolean_query =
BooleanQuery::new(vec![(Occur::Should, term_a), (Occur::Should, term_b)]); BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
let boolean_weight = boolean_query.weight(&searcher, true).unwrap(); let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
{ {
let mut boolean_scorer = boolean_weight let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 1.0) .scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap(); .unwrap();
assert_eq!(boolean_scorer.doc(), 0u32); assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 0.84163445); assert_nearly_equals!(boolean_scorer.score(), 0.84163445f32);
} }
{ {
let mut boolean_scorer = boolean_weight let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 2.0) .scorer(searcher.segment_reader(0u32), 2.0f32)
.unwrap(); .unwrap();
assert_eq!(boolean_scorer.doc(), 0u32); assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 1.6832689); assert_nearly_equals!(boolean_scorer.score(), 1.6832689f32);
} }
} }
@@ -279,38 +289,174 @@ mod tests {
}; };
{ {
let boolean_query = BooleanQuery::new(vec![ let boolean_query = BooleanQuery::from(vec![
(Occur::Must, make_term_query("a")), (Occur::Must, make_term_query("a")),
(Occur::Must, make_term_query("b")), (Occur::Must, make_term_query("b")),
]); ]);
let scores = score_docs(&boolean_query); assert_eq!(score_docs(&boolean_query), vec![0.977973, 0.84699446]);
assert_nearly_equals!(scores[0], 0.977973);
assert_nearly_equals!(scores[1], 0.84699446);
} }
} }
// motivated by #554
#[test] #[test]
pub fn test_explain() -> crate::Result<()> { fn test_bm25_several_fields() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let text = schema_builder.add_text_field("text", STRING); let title = schema_builder.add_text_field("title", TEXT);
let text = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 5_000_000)?;
index_writer.add_document(doc!(text=>"a")); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text=>"b")); index_writer.add_document(doc!(
index_writer.commit()?; // tf = 1 0
let searcher = index.reader()?.searcher(); title => "Законы притяжения Оксана Кулакова",
let term_a: Box<dyn Query> = Box::new(TermQuery::new( // tf = 1 0
Term::from_field_text(text, "a"), text => "Законы притяжения Оксана Кулакова] \n\nТема: Сексуальное искусство, Женственность\nТип товара: Запись вебинара (аудио)\nПродолжительность: 1,5 часа\n\nСсылка на вебинар:\n ",
IndexRecordOption::Basic,
)); ));
let term_b: Box<dyn Query> = Box::new(TermQuery::new( index_writer.add_document(doc!(
Term::from_field_text(text, "b"), // tf = 1 0
IndexRecordOption::Basic, title => "Любимые русские пироги (Оксана Путан)",
// tf = 2 0
text => "http://i95.fastpic.ru/big/2017/0628/9a/615b9c8504d94a3893d7f496ac53539a.jpg \n\nОт издателя\nОксана Путан профессиональный повар, автор кулинарных книг и известный кулинарный блогер. Ее рецепты отличаются практичностью, доступностью и пользуются огромной популярностью в русскоязычном интернете. Это третья книга автора о самом вкусном и ароматном настоящих русских пирогах и выпечке!\nДаже новички на кухне легко готовят по ее рецептам. Оксана описывает процесс приготовления настолько подробно и понятно, что вам остается только наслаждаться готовкой и не тратить время на лишние усилия. Готовьте легко и просто!\n\nhttps://www.ozon.ru/context/detail/id/139872462/"
)); ));
let query = BooleanQuery::from(vec![(Occur::Should, term_a), (Occur::Should, term_b)]); index_writer.add_document(doc!(
let explanation = query.explain(&searcher, DocAddress(0, 0u32))?; // tf = 1 1
assert_nearly_equals!(explanation.value(), 0.6931472); title => "PDF Мастер Класс \"Морячок\" (Оксана Лифенко)",
Ok(()) // tf = 0 0
text => "https://i.ibb.co/pzvHrDN/I3d U T6 Gg TM.jpg\nhttps://i.ibb.co/NFrb6v6/N0ls Z9nwjb U.jpg\nВ описание входит штаны, кофта, берет, матросский воротник. Описание продается в формате PDF, состоит из 12 страниц формата А4 и может быть напечатано на любом принтере.\nОписание предназначено для кукол BJD RealPuki от FairyLand, но может подойти и другим подобным куклам. Также вы можете вязать этот наряд из обычной пряжи, и он подойдет для куколок побольше.\nhttps://vk.com/market 95724412?w=product 95724412_2212"
));
for _ in 0..1_000 {
index_writer.add_document(doc!(
title => "a b d e f g",
text => "maitre corbeau sur un arbre perche tenait dans son bec un fromage Maitre rnard par lodeur alleche lui tint a peu pres ce langage."
));
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
scorer.advance();
let explanation = query.explain(&searcher, DocAddress(0u32, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
r#"{
"value": 12.997711,
"description": "BooleanClause. Sum of ...",
"details": [
{
"value": 12.997711,
"description": "BooleanClause. Sum of ...",
"details": [
{
"value": 6.551476,
"description": "TermQuery, product of...",
"details": [
{
"value": 2.2,
"description": "(K1+1)"
},
{
"value": 5.658984,
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
"details": [
{
"value": 3.0,
"description": "n, number of docs containing this term"
},
{
"value": 1003.0,
"description": "N, total number of docs"
}
]
},
{
"value": 0.5262329,
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
"details": [
{
"value": 1.0,
"description": "freq, occurrences of term within document"
},
{
"value": 1.2,
"description": "k1, term saturation parameter"
},
{
"value": 0.75,
"description": "b, length normalization parameter"
},
{
"value": 4.0,
"description": "dl, length of field"
},
{
"value": 5.997009,
"description": "avgdl, average length of field"
}
]
}
]
},
{
"value": 6.446235,
"description": "TermQuery, product of...",
"details": [
{
"value": 2.2,
"description": "(K1+1)"
},
{
"value": 5.9954567,
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
"details": [
{
"value": 2.0,
"description": "n, number of docs containing this term"
},
{
"value": 1003.0,
"description": "N, total number of docs"
}
]
},
{
"value": 0.4887212,
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
"details": [
{
"value": 1.0,
"description": "freq, occurrences of term within document"
},
{
"value": 1.2,
"description": "k1, term saturation parameter"
},
{
"value": 0.75,
"description": "b, length normalization parameter"
},
{
"value": 20.0,
"description": "dl, length of field"
},
{
"value": 24.123629,
"description": "avgdl, average length of field"
}
]
}
]
}
]
}
]
}"#
);
} }
} }

View File

@@ -1,7 +1,7 @@
use crate::fastfield::DeleteBitSet; use crate::fastfield::DeleteBitSet;
use crate::query::explanation::does_not_match; use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight}; use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term}; use crate::{DocId, DocSet, Searcher, SegmentReader, Term};
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::fmt; use std::fmt;
@@ -12,12 +12,12 @@ use std::fmt;
/// factor. /// factor.
pub struct BoostQuery { pub struct BoostQuery {
query: Box<dyn Query>, query: Box<dyn Query>,
boost: Score, boost: f32,
} }
impl BoostQuery { impl BoostQuery {
/// Builds a boost query. /// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery { pub fn new(query: Box<dyn Query>, boost: f32) -> BoostQuery {
BoostQuery { query, boost } BoostQuery { query, boost }
} }
} }
@@ -55,22 +55,22 @@ impl Query for BoostQuery {
pub(crate) struct BoostWeight { pub(crate) struct BoostWeight {
weight: Box<dyn Weight>, weight: Box<dyn Weight>,
boost: Score, boost: f32,
} }
impl BoostWeight { impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self { pub fn new(weight: Box<dyn Weight>, boost: f32) -> Self {
BoostWeight { weight, boost } BoostWeight { weight, boost }
} }
} }
impl Weight for BoostWeight { impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> { fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost) self.weight.scorer(reader, boost * self.boost)
} }
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?; let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.seek(doc) != doc { if scorer.seek(doc) != doc {
return Err(does_not_match(doc)); return Err(does_not_match(doc));
} }
@@ -88,11 +88,11 @@ impl Weight for BoostWeight {
pub(crate) struct BoostScorer<S: Scorer> { pub(crate) struct BoostScorer<S: Scorer> {
underlying: S, underlying: S,
boost: Score, boost: f32,
} }
impl<S: Scorer> BoostScorer<S> { impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> { pub fn new(underlying: S, boost: f32) -> BoostScorer<S> {
BoostScorer { underlying, boost } BoostScorer { underlying, boost }
} }
} }
@@ -128,7 +128,7 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
} }
impl<S: Scorer> Scorer for BoostScorer<S> { impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> Score { fn score(&mut self) -> f32 {
self.underlying.score() * self.boost self.underlying.score() * self.boost
} }
} }
@@ -144,7 +144,7 @@ mod tests {
fn test_boost_query_explain() { fn test_boost_query_explain() {
let schema = Schema::builder().build(); let schema = Schema::builder().build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(Document::new()); index_writer.add_document(Document::new());
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
@@ -153,7 +153,7 @@ mod tests {
let explanation = query.explain(&searcher, DocAddress(0, 0u32)).unwrap(); let explanation = query.explain(&searcher, DocAddress(0, 0u32)).unwrap();
assert_eq!( assert_eq!(
explanation.to_pretty_json(), explanation.to_pretty_json(),
"{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\",\n \"context\": []\n }\n ],\n \"context\": []\n}" "{\n \"value\": 0.2,\n \"description\": \"Boost x0.2 of ...\",\n \"details\": [\n {\n \"value\": 1.0,\n \"description\": \"AllQuery\"\n }\n ]\n}"
) )
} }
} }

Some files were not shown because too many files have changed in this diff Show More