Compare commits

..

1 Commits

Author SHA1 Message Date
Paul Masurel
8861919d5f Alive doc iterator. 2020-06-02 09:06:46 +09:00
111 changed files with 1787 additions and 3295 deletions

2
.gitignore vendored
View File

@@ -1,5 +1,4 @@
tantivy.iml
proptest-regressions
*.swp
target
target/debug
@@ -12,4 +11,3 @@ cpp/simdcomp/bitpackingbenchmark
*.bk
.idea
trace.dat
cargo-timing*

View File

@@ -1,23 +1,5 @@
Tantivy 0.14.0
=========================
- Remove dependency to atomicwrites #833. Implemented by @pmasurel upon suggestion and research from @asafigan).
- Migrated tantivy error from the now deprecated `failure` crate to `thiserror` #760. (@hirevo)
- Switched to structure logging (via the `slog` crate). (@pmasurel)
Tantivy 0.13.1
===================
Made `Query` and `Collector` `Send + Sync`.
Updated misc dependency versions.
Tantivy 0.13.0
======================
Tantivy 0.13 introduce a change in the index format that will require
you to reindex your index (BlockWAND information are added in the skiplist).
The index size increase is minor as this information is only added for
full blocks.
If you have a massive index for which reindexing is not an option, please contact me
so that we can discuss possible solutions.
- Bugfix in `FuzzyTermQuery` not matching terms by prefix when it should (@Peachball)
- Relaxed constraints on the custom/tweak score functions. At the segment level, they can be mut, and they are not required to be Sync + Send.
- `MMapDirectory::open` does not return a `Result` anymore.
@@ -35,8 +17,6 @@ while doc != TERMINATED {
The change made it possible to greatly simplify a lot of the docset's code.
- Misc internal optimization and introduction of the `Scorer::for_each_pruning` function. (@fulmicoton)
- Added an offset option to the Top(.*)Collectors. (@robyoung)
- Added Block WAND. Performance on TOP-K on term-unions should be greatly increased. (@fulmicoton, and special thanks
to the PISA team for answering all my questions!)
Tantivy 0.12.0
======================

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.14.0-dev"
version = "0.12.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -13,21 +13,21 @@ keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
base64 = "0.12"
byteorder = "1"
crc32fast = "1"
once_cell = "1"
regex ={version = "1", default-features = false, features = ["std"]}
base64 = "0.12.0"
byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.3"
memmap = {version = "0.7", optional=true}
lz4 = {version="1", optional=true}
lz4 = {version="1.20", optional=true}
snap = "1"
tempfile = {version="3", optional=true}
slog = "2.5"
slog-stdlog = "4"
serde = {version="1", features=["derive"]}
serde_json = "1"
num_cpus = "1"
atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4"
serde = {version="1.0", features=["derive"]}
serde_json = "1.0"
num_cpus = "1.2"
fs2={version="0.4", optional=true}
levenshtein_automata = "0.2"
notify = {version="4", optional=true}
@@ -35,20 +35,20 @@ uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.7"
futures = {version = "0.3", features=["thread-pool"] }
owning_ref = "0.4"
tantivy-query-grammar = { version="0.14.0-dev", path="./query-grammar" }
stable_deref_trait = "1"
rust-stemmers = "1"
downcast-rs = "1"
stable_deref_trait = "1.0.0"
rust-stemmers = "1.2"
downcast-rs = { version="1.0" }
tantivy-query-grammar = { version="0.13", path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4"
fnv = "1"
fnv = "1.0.6"
owned-read = "0.4"
thiserror = "1.0"
htmlescape = "0.3"
failure = "0.1"
htmlescape = "0.3.1"
fail = "0.4"
murmurhash32 = "0.2"
chrono = "0.4"
smallvec = "1"
smallvec = "1.0"
rayon = "1"
[target.'cfg(windows)'.dependencies]
@@ -58,7 +58,6 @@ winapi = "0.3"
rand = "0.7"
maplit = "1"
matches = "0.1.8"
proptest = "0.10"
[dev-dependencies.fail]
version = "0.4"
@@ -75,7 +74,7 @@ overflow-checks = true
[features]
default = ["mmap"]
mmap = ["fs2", "tempfile", "memmap", "notify"]
mmap = ["atomicwrites", "fs2", "memmap", "notify"]
lz4-compression = ["lz4"]
failpoints = ["fail/failpoints"]
unstable = [] # useful for benches.

View File

@@ -34,6 +34,11 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
performance for different type of queries / collection.
In general, Tantivy tends to be
- slower than Lucene on union with a Top-K due to Block-WAND optimization.
- faster than Lucene on intersection and phrase queries.
Your mileage WILL vary depending on the nature of queries and their load.
# Features

View File

@@ -112,6 +112,18 @@ fn main() -> tantivy::Result<()> {
limbs and branches that arch over the pool"
));
index_writer.add_document(doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
over the yellow sands in the sunlight before reaching the narrow pool. On one \
side of the river the golden foothill slopes curve up to the strong and rocky \
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
));
// Multivalued field just need to be repeated.
index_writer.add_document(doc!(
title => "Frankenstein",

View File

@@ -14,7 +14,7 @@ use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser;
use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Score, SegmentReader, TantivyError};
use tantivy::{doc, Index, SegmentReader, TantivyError};
#[derive(Default)]
struct Stats {
@@ -114,7 +114,7 @@ struct StatsSegmentCollector {
impl SegmentCollector for StatsSegmentCollector {
type Fruit = Option<Stats>;
fn collect(&mut self, doc: u32, _score: Score) {
fn collect(&mut self, doc: u32, _score: f32) {
let value = self.fast_field_reader.get(doc) as f64;
self.stats.count += 1;
self.stats.sum += value;

View File

@@ -117,16 +117,11 @@ fn main() -> tantivy::Result<()> {
if let Some(mut block_segment_postings) =
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
{
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
while block_segment_postings.advance() {
// Once again these docs MAY contains deleted documents as well.
let docs = block_segment_postings.docs();
// Prints `Docs [0, 2].`
println!("Docs {:?}", docs);
block_segment_postings.advance();
}
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy-query-grammar"
version = "0.14.0-dev"
version = "0.13.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]

View File

@@ -31,12 +31,22 @@ impl Occur {
/// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur {
match (left, right) {
(Occur::Should, _) => right,
(Occur::Must, Occur::MustNot) => Occur::MustNot,
(Occur::Must, _) => Occur::Must,
(Occur::MustNot, Occur::MustNot) => Occur::Must,
(Occur::MustNot, _) => Occur::MustNot,
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
}
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
}
}
}
@@ -46,27 +56,3 @@ impl fmt::Display for Occur {
f.write_char(self.to_char())
}
}
#[cfg(test)]
mod test {
use crate::Occur;
#[test]
fn test_occur_compose() {
assert_eq!(Occur::compose(Occur::Should, Occur::Should), Occur::Should);
assert_eq!(Occur::compose(Occur::Should, Occur::Must), Occur::Must);
assert_eq!(
Occur::compose(Occur::Should, Occur::MustNot),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::Must, Occur::Should), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::Must), Occur::Must);
assert_eq!(Occur::compose(Occur::Must, Occur::MustNot), Occur::MustNot);
assert_eq!(
Occur::compose(Occur::MustNot, Occur::Should),
Occur::MustNot
);
assert_eq!(Occur::compose(Occur::MustNot, Occur::Must), Occur::MustNot);
assert_eq!(Occur::compose(Occur::MustNot, Occur::MustNot), Occur::Must);
}
}

View File

@@ -9,10 +9,8 @@ use combine::{
fn field<'a>() -> impl Parser<&'a str, Output = String> {
(
(letter().or(char('_'))),
many(satisfy(|c: char| {
c.is_alphanumeric() || c == '_' || c == '-'
})),
letter(),
many(satisfy(|c: char| c.is_alphanumeric() || c == '_')),
)
.skip(char(':'))
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
@@ -182,7 +180,7 @@ fn occur_leaf<'a>() -> impl Parser<&'a str, Output = (Option<Occur>, UserInputAS
(optional(occur_symbol()), boosted_leaf())
}
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f32> {
(many1(digit()), optional((char('.'), many1(digit())))).map(
|(int_part, decimal_part_opt): (String, Option<(char, String)>)| {
let mut float_str = int_part;
@@ -190,18 +188,18 @@ fn positive_float_number<'a>() -> impl Parser<&'a str, Output = f64> {
float_str.push(chr);
float_str.push_str(&decimal_str);
}
float_str.parse::<f64>().unwrap()
float_str.parse::<f32>().unwrap()
},
)
}
fn boost<'a>() -> impl Parser<&'a str, Output = f64> {
fn boost<'a>() -> impl Parser<&'a str, Output = f32> {
(char('^'), positive_float_number()).map(|(_, boost)| boost)
}
fn boosted_leaf<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
(leaf(), optional(boost())).map(|(leaf, boost_opt)| match boost_opt {
Some(boost) if (boost - 1.0).abs() > std::f64::EPSILON => {
Some(boost) if (boost - 1.0).abs() > std::f32::EPSILON => {
UserInputAST::Boost(Box::new(leaf), boost)
}
_ => leaf,
@@ -281,16 +279,14 @@ pub fn parse_to_ast<'a>() -> impl Parser<&'a str, Output = UserInputAST> {
#[cfg(test)]
mod test {
type TestParseResult = Result<(), StringStreamError>;
use super::*;
use combine::parser::Parser;
pub fn nearly_equals(a: f64, b: f64) -> bool {
pub fn nearly_equals(a: f32, b: f32) -> bool {
(a - b).abs() < 0.0005 * (a + b).abs()
}
fn assert_nearly_equals(expected: f64, val: f64) {
fn assert_nearly_equals(expected: f32, val: f32) {
assert!(
nearly_equals(val, expected),
"Got {}, expected {}.",
@@ -300,15 +296,14 @@ mod test {
}
#[test]
fn test_occur_symbol() -> TestParseResult {
assert_eq!(super::occur_symbol().parse("-")?, (Occur::MustNot, ""));
assert_eq!(super::occur_symbol().parse("+")?, (Occur::Must, ""));
Ok(())
fn test_occur_symbol() {
assert_eq!(super::occur_symbol().parse("-"), Ok((Occur::MustNot, "")));
assert_eq!(super::occur_symbol().parse("+"), Ok((Occur::Must, "")));
}
#[test]
fn test_positive_float_number() {
fn valid_parse(float_str: &str, expected_val: f64, expected_remaining: &str) {
fn valid_parse(float_str: &str, expected_val: f32, expected_remaining: &str) {
let (val, remaining) = positive_float_number().parse(float_str).unwrap();
assert_eq!(remaining, expected_remaining);
assert_nearly_equals(val, expected_val);
@@ -316,9 +311,9 @@ mod test {
fn error_parse(float_str: &str) {
assert!(positive_float_number().parse(float_str).is_err());
}
valid_parse("1.0", 1.0, "");
valid_parse("1", 1.0, "");
valid_parse("0.234234 aaa", 0.234234f64, " aaa");
valid_parse("1.0", 1.0f32, "");
valid_parse("1", 1.0f32, "");
valid_parse("0.234234 aaa", 0.234234f32, " aaa");
error_parse(".3332");
error_parse("1.");
error_parse("-1.");
@@ -415,25 +410,6 @@ mod test {
assert_eq!(format!("{:?}", ast), "\"abc\"");
}
#[test]
fn test_field_name() -> TestParseResult {
assert_eq!(
super::field().parse("my-field-name:a")?,
("my-field-name".to_string(), "a")
);
assert_eq!(
super::field().parse("my_field_name:a")?,
("my_field_name".to_string(), "a")
);
assert!(super::field().parse(":a").is_err());
assert!(super::field().parse("-my_field:a").is_err());
assert_eq!(
super::field().parse("_my_field:a")?,
("_my_field".to_string(), "a")
);
Ok(())
}
#[test]
fn test_range_parser() {
// testing the range() parser separately

View File

@@ -87,7 +87,7 @@ impl UserInputBound {
pub enum UserInputAST {
Clause(Vec<(Option<Occur>, UserInputAST)>),
Leaf(Box<UserInputLeaf>),
Boost(Box<UserInputAST>, f64),
Boost(Box<UserInputAST>, f32),
}
impl UserInputAST {

View File

@@ -96,18 +96,18 @@ mod tests {
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1.0);
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1.0);
count_collector.collect(0u32, 1f32);
assert_eq!(count_collector.harvest(), 1);
}
{
let mut count_collector = SegmentCountCollector::default();
count_collector.collect(0u32, 1.0);
count_collector.collect(1u32, 1.0);
count_collector.collect(0u32, 1f32);
count_collector.collect(1u32, 1f32);
assert_eq!(count_collector.harvest(), 2);
}
}

View File

@@ -46,7 +46,7 @@ pub trait CustomScorer<TScore>: Sync {
impl<TCustomScorer, TScore> Collector for CustomScoreTopCollector<TCustomScorer, TScore>
where
TCustomScorer: CustomScorer<TScore> + Send + Sync,
TCustomScorer: CustomScorer<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;

View File

@@ -472,7 +472,7 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let num_facets: usize = 3 * 4 * 5;
let facets: Vec<Facet> = (0..num_facets)
.map(|mut n| {
@@ -531,7 +531,7 @@ mod tests {
let facet_field = schema_builder.add_facet_field("facets");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/subjects/A/a"),
facet_field => Facet::from_text(&"/subjects/B/a"),
@@ -550,12 +550,12 @@ mod tests {
}
#[test]
fn test_doc_search_by_facet() -> crate::Result<()> {
fn test_doc_search_by_facet() {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/A"),
));
@@ -568,8 +568,8 @@ mod tests {
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/D/C/A"),
));
index_writer.commit()?;
let reader = index.reader()?;
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 4);
@@ -586,17 +586,17 @@ mod tests {
assert_eq!(count_facet("/A/C"), 1);
assert_eq!(count_facet("/A/C/A"), 1);
assert_eq!(count_facet("/C/A"), 0);
let query_parser = QueryParser::for_index(&index, vec![]);
{
let query = query_parser.parse_query("facet:/A/B")?;
assert_eq!(1, searcher.search(&query, &Count).unwrap());
let query_parser = QueryParser::for_index(&index, vec![]);
{
let query = query_parser.parse_query("facet:/A/B").unwrap();
assert_eq!(1, searcher.search(&query, &Count).unwrap());
}
{
let query = query_parser.parse_query("facet:/A").unwrap();
assert_eq!(3, searcher.search(&query, &Count).unwrap());
}
}
{
let query = query_parser.parse_query("facet:/A")?;
assert_eq!(3, searcher.search(&query, &Count)?);
}
Ok(())
}
#[test]
@@ -631,7 +631,7 @@ mod tests {
.collect();
docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs {
index_writer.add_document(doc);
}
@@ -684,7 +684,7 @@ mod bench {
// 40425 docs
docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs {
index_writer.add_document(doc);
}

View File

@@ -89,7 +89,7 @@ mod tests {
let index = Index::create_in_ram(schema.clone());
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for i in 0u64..10u64 {
index_writer.add_document(doc!(

View File

@@ -133,7 +133,7 @@ impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
/// The collection logic itself is in the `SegmentCollector`.
///
/// Segments are not guaranteed to be visited in any specific order.
pub trait Collector: Sync + Send {
pub trait Collector: Sync {
/// `Fruit` is the type for the result of our collection.
/// e.g. `usize` for the `Count` collector.
type Fruit: Fruit;

View File

@@ -55,7 +55,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
type Fruit = Box<dyn Fruit>;
fn collect(&mut self, doc: u32, score: Score) {
fn collect(&mut self, doc: u32, score: f32) {
self.as_mut().collect(doc, score);
}
@@ -65,7 +65,7 @@ impl SegmentCollector for Box<dyn BoxableSegmentCollector> {
}
pub trait BoxableSegmentCollector {
fn collect(&mut self, doc: u32, score: Score);
fn collect(&mut self, doc: u32, score: f32);
fn harvest_from_box(self: Box<Self>) -> Box<dyn Fruit>;
}
@@ -74,7 +74,7 @@ pub struct SegmentCollectorWrapper<TSegmentCollector: SegmentCollector>(TSegment
impl<TSegmentCollector: SegmentCollector> BoxableSegmentCollector
for SegmentCollectorWrapper<TSegmentCollector>
{
fn collect(&mut self, doc: u32, score: Score) {
fn collect(&mut self, doc: u32, score: f32) {
self.0.collect(doc, score);
}
@@ -259,7 +259,7 @@ mod tests {
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text=>"abc"));
index_writer.add_document(doc!(text=>"abc abc abc"));
index_writer.add_document(doc!(text=>"abc abc"));

View File

@@ -206,7 +206,7 @@ impl Collector for BytesFastFieldTestCollector {
impl SegmentCollector for BytesFastFieldSegmentCollector {
type Fruit = Vec<u8>;
fn collect(&mut self, doc: u32, _score: Score) {
fn collect(&mut self, doc: u32, _score: f32) {
let data = self.reader.get_bytes(doc);
self.vals.extend(data);
}

View File

@@ -38,7 +38,7 @@ use std::fmt;
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
@@ -52,8 +52,8 @@ use std::fmt;
/// let query = query_parser.parse_query("diary").unwrap();
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
///
/// assert_eq!(top_docs[0].1, DocAddress(0, 1));
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// ```
pub struct TopDocs(TopCollector<Score>);
@@ -123,7 +123,7 @@ impl TopDocs {
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
///
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
@@ -139,8 +139,8 @@ impl TopDocs {
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2).and_offset(1)).unwrap();
///
/// assert_eq!(top_docs.len(), 2);
/// assert_eq!(top_docs[0].1, DocAddress(0, 4));
/// assert_eq!(top_docs[1].1, DocAddress(0, 3));
/// assert_eq!(&top_docs[0], &(0.5204813, DocAddress(0, 4)));
/// assert_eq!(&top_docs[1], &(0.4793185, DocAddress(0, 3)));
/// ```
pub fn and_offset(self, offset: usize) -> TopDocs {
TopDocs(self.0.and_offset(offset))
@@ -163,7 +163,7 @@ impl TopDocs {
/// # let schema = schema_builder.build();
/// #
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
@@ -264,7 +264,7 @@ impl TopDocs {
/// fn create_index() -> tantivy::Result<Index> {
/// let schema = create_schema();
/// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
@@ -303,7 +303,7 @@ impl TopDocs {
/// let popularity: u64 = popularity_reader.get(doc);
/// // Well.. For the sake of the example we use a simple logarithm
/// // function.
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
/// let popularity_boost_score = ((2u64 + popularity) as f32).log2();
/// popularity_boost_score * original_score
/// }
/// });
@@ -324,7 +324,7 @@ impl TopDocs {
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TScoreSegmentTweaker: ScoreSegmentTweaker<TScore> + 'static,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker> + Send + Sync,
TScoreTweaker: ScoreTweaker<TScore, Child = TScoreSegmentTweaker>,
{
TweakedScoreTopCollector::new(score_tweaker, self.0.into_tscore())
}
@@ -371,7 +371,7 @@ impl TopDocs {
/// # fn main() -> tantivy::Result<()> {
/// # let schema = create_schema();
/// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap();
/// #
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
@@ -438,7 +438,7 @@ impl TopDocs {
where
TScore: 'static + Send + Sync + Clone + PartialOrd,
TCustomSegmentScorer: CustomSegmentScorer<TScore> + 'static,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer> + Send + Sync,
TCustomScorer: CustomScorer<TScore, Child = TCustomSegmentScorer>,
{
CustomScoreTopCollector::new(custom_score, self.0.into_tscore())
}
@@ -479,7 +479,7 @@ impl Collector for TopDocs {
let mut heap: BinaryHeap<ComparableDoc<Score, DocId>> = BinaryHeap::with_capacity(heap_len);
if let Some(delete_bitset) = reader.delete_bitset() {
let mut threshold = Score::MIN;
let mut threshold = f32::MIN;
weight.for_each_pruning(threshold, reader, &mut |doc, score| {
if delete_bitset.is_deleted(doc) {
return threshold;
@@ -491,16 +491,16 @@ impl Collector for TopDocs {
if heap.len() < heap_len {
heap.push(heap_item);
if heap.len() == heap_len {
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
threshold = heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
}
return threshold;
}
*heap.peek_mut().unwrap() = heap_item;
threshold = heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
threshold = heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN);
threshold
})?;
} else {
weight.for_each_pruning(Score::MIN, reader, &mut |doc, score| {
weight.for_each_pruning(f32::MIN, reader, &mut |doc, score| {
let heap_item = ComparableDoc {
feature: score,
doc,
@@ -509,13 +509,13 @@ impl Collector for TopDocs {
heap.push(heap_item);
// TODO the threshold is suboptimal for heap.len == heap_len
if heap.len() == heap_len {
return heap.peek().map(|el| el.feature).unwrap_or(Score::MIN);
return heap.peek().map(|el| el.feature).unwrap_or(f32::MIN);
} else {
return Score::MIN;
return f32::MIN;
}
}
*heap.peek_mut().unwrap() = heap_item;
heap.peek().map(|el| el.feature).unwrap_or(Score::MIN)
heap.peek().map(|el| el.feature).unwrap_or(std::f32::MIN)
})?;
}
@@ -561,7 +561,7 @@ mod tests {
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
index_writer.add_document(doc!(text_field=>"I like Droopy"));
@@ -570,13 +570,6 @@ mod tests {
index
}
fn assert_results_equals(results: &[(Score, DocAddress)], expected: &[(Score, DocAddress)]) {
for (result, expected) in results.iter().zip(expected.iter()) {
assert_eq!(result.1, expected.1);
crate::assert_nearly_equals!(result.0, expected.0);
}
}
#[test]
fn test_top_collector_not_at_capacity() {
let index = make_index();
@@ -589,13 +582,13 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(4))
.unwrap();
assert_results_equals(
&score_docs,
&[
assert_eq!(
score_docs,
vec![
(0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0)),
],
(0.48527452, DocAddress(0, 0))
]
);
}
@@ -611,7 +604,7 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(4).and_offset(2))
.unwrap();
assert_results_equals(&score_docs[..], &[(0.48527452, DocAddress(0, 0))]);
assert_eq!(score_docs, vec![(0.48527452, DocAddress(0, 0))]);
}
#[test]
@@ -626,12 +619,12 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(2))
.unwrap();
assert_results_equals(
&score_docs,
&[
assert_eq!(
score_docs,
vec![
(0.81221175, DocAddress(0u32, 1)),
(0.5376842, DocAddress(0u32, 2)),
],
]
);
}
@@ -647,12 +640,12 @@ mod tests {
.searcher()
.search(&text_query, &TopDocs::with_limit(2).and_offset(1))
.unwrap();
assert_results_equals(
&score_docs[..],
&[
assert_eq!(
score_docs,
vec![
(0.5376842, DocAddress(0u32, 2)),
(0.48527452, DocAddress(0, 0)),
],
(0.48527452, DocAddress(0, 0))
]
);
}
@@ -713,8 +706,8 @@ mod tests {
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
assert_eq!(
&top_docs[..],
&[
top_docs,
vec![
(64, DocAddress(0, 1)),
(16, DocAddress(0, 2)),
(12, DocAddress(0, 0))
@@ -821,7 +814,7 @@ mod tests {
) -> (Index, Box<dyn Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer);
index_writer.commit().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]);

View File

@@ -49,7 +49,7 @@ pub trait ScoreTweaker<TScore>: Sync {
impl<TScoreTweaker, TScore> Collector for TweakedScoreTopCollector<TScoreTweaker, TScore>
where
TScoreTweaker: ScoreTweaker<TScore> + Send + Sync,
TScoreTweaker: ScoreTweaker<TScore>,
TScore: 'static + PartialOrd + Clone + Send + Sync,
{
type Fruit = Vec<(TScore, DocAddress)>;

View File

@@ -10,9 +10,7 @@ pub(crate) use self::bitset::TinySet;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::counting_writer::CountingWriter;
pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
};
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
pub use byteorder::LittleEndian as Endianness;
/// Segment's max doc must be `< MAX_DOC_LIMIT`.

View File

@@ -89,19 +89,6 @@ impl FixedSize for u64 {
const SIZE_IN_BYTES: usize = 8;
}
impl BinarySerializable for f32 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_f32::<Endianness>(*self)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
reader.read_f32::<Endianness>()
}
}
impl FixedSize for f32 {
const SIZE_IN_BYTES: usize = 4;
}
impl BinarySerializable for i64 {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_i64::<Endianness>(*self)

View File

@@ -5,12 +5,12 @@ use std::io::Read;
use std::io::Write;
/// Wrapper over a `u64` that serializes as a variable int.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[derive(Debug, Eq, PartialEq)]
pub struct VInt(pub u64);
const STOP_BIT: u8 = 128;
pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
const START_2: u64 = 1 << 7;
const START_3: u64 = 1 << 14;
const START_4: u64 = 1 << 21;
@@ -29,7 +29,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
let val = u64::from(val);
const STOP_BIT: u64 = 128u64;
let (res, num_bytes) = match val {
match val {
0..=STOP_1 => (val | STOP_BIT, 1),
START_2..=STOP_2 => (
(val & MASK_1) | ((val & MASK_2) << 1) | (STOP_BIT << (8)),
@@ -56,9 +56,7 @@ pub fn serialize_vint_u32(val: u32, buf: &mut [u8; 8]) -> &[u8] {
| (STOP_BIT << (8 * 4)),
5,
),
};
LittleEndian::write_u64(&mut buf[..], res);
&buf[0..num_bytes]
}
}
/// Returns the number of bytes covered by a
@@ -87,26 +85,23 @@ fn vint_len(data: &[u8]) -> usize {
/// If the buffer does not start by a valid
/// vint payload
pub fn read_u32_vint(data: &mut &[u8]) -> u32 {
let (result, vlen) = read_u32_vint_no_advance(*data);
*data = &data[vlen..];
result
}
pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
let vlen = vint_len(data);
let vlen = vint_len(*data);
let mut result = 0u32;
let mut shift = 0u64;
for &b in &data[..vlen] {
result |= u32::from(b & 127u8) << shift;
shift += 7;
}
(result, vlen)
*data = &data[vlen..];
result
}
/// Write a `u32` as a vint payload.
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
let mut buf = [0u8; 8];
let data = serialize_vint_u32(val, &mut buf);
writer.write_all(&data)
let (val, num_bytes) = serialize_vint_u32(val);
let mut buffer = [0u8; 8];
LittleEndian::write_u64(&mut buffer, val);
writer.write_all(&buffer[..num_bytes])
}
impl VInt {
@@ -177,6 +172,7 @@ mod tests {
use super::serialize_vint_u32;
use super::VInt;
use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
fn aux_test_vint(val: u64) {
let mut v = [14u8; 10];
@@ -212,10 +208,12 @@ mod tests {
fn aux_test_serialize_vint_u32(val: u32) {
let mut buffer = [0u8; 10];
let mut buffer2 = [0u8; 8];
let mut buffer2 = [0u8; 10];
let len_vint = VInt(val as u64).serialize_into(&mut buffer);
let res2 = serialize_vint_u32(val, &mut buffer2);
assert_eq!(&buffer[..len_vint], res2, "array wrong for {}", val);
let (vint, len) = serialize_vint_u32(val);
assert_eq!(len, len_vint, "len wrong for val {}", val);
LittleEndian::write_u64(&mut buffer2, vint);
assert_eq!(&buffer[..len], &buffer2[..len], "array wrong for {}", val);
}
#[test]

View File

@@ -1,6 +1,5 @@
use crossbeam::channel;
use rayon::{ThreadPool, ThreadPoolBuilder};
use slog::{error, Logger};
/// Search executor whether search request are single thread or multithread.
///
@@ -44,7 +43,6 @@ impl Executor {
&self,
f: F,
args: AIterator,
logger: Logger,
) -> crate::Result<Vec<R>> {
match self {
Executor::SingleThread => args.map(f).collect::<crate::Result<_>>(),
@@ -59,7 +57,7 @@ impl Executor {
let (idx, arg) = arg_with_idx;
let fruit = f(arg);
if let Err(err) = fruit_sender.send((idx, fruit)) {
error!(logger, "Failed to send search task. It probably means all search threads have panicked. {:?}", err);
error!("Failed to send search task. It probably means all search threads have panicked. {:?}", err);
}
});
}
@@ -89,21 +87,17 @@ impl Executor {
#[cfg(test)]
mod tests {
use slog::{o, Discard, Logger};
use super::Executor;
#[test]
#[should_panic(expected = "panic should propagate")]
fn test_panic_propagates_single_thread() {
let logger = Logger::root(Discard, o!());
let _result: Vec<usize> = Executor::single_thread()
.map(
|_| {
panic!("panic should propagate");
},
vec![0].into_iter(),
logger,
)
.unwrap();
}
@@ -111,7 +105,6 @@ mod tests {
#[test]
#[should_panic] //< unfortunately the panic message is not propagated
fn test_panic_propagates_multi_thread() {
let logger = Logger::root(Discard, o!());
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
.unwrap()
.map(
@@ -119,16 +112,14 @@ mod tests {
panic!("panic should propagate");
},
vec![0].into_iter(),
logger,
)
.unwrap();
}
#[test]
fn test_map_singlethread() {
let logger = Logger::root(Discard, o!());
let result: Vec<usize> = Executor::single_thread()
.map(|i| Ok(i * 2), 0..1_000, logger)
.map(|i| Ok(i * 2), 0..1_000)
.unwrap();
assert_eq!(result.len(), 1_000);
for i in 0..1_000 {
@@ -138,10 +129,9 @@ mod tests {
#[test]
fn test_map_multithread() {
let logger = Logger::root(Discard, o!());
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
.unwrap()
.map(|i| Ok(i * 2), 0..10, logger)
.map(|i| Ok(i * 2), 0..10)
.unwrap();
assert_eq!(result.len(), 10);
for i in 0..10 {

View File

@@ -21,14 +21,11 @@ use crate::schema::FieldType;
use crate::schema::Schema;
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::IndexWriter;
use slog::Logger;
use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::fmt;
#[cfg(feature = "mmap")]
use std::path::Path;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::sync::Arc;
fn load_metas(
@@ -58,14 +55,7 @@ pub struct Index {
}
impl Index {
pub(crate) fn logger(&self) -> &Logger {
self.directory.logger()
}
/// Examines the directory to see if it contains an index.
///
/// Effectively, it only checks for the presence of the `meta.json` file.
/// Examines the director to see if it contains an index
pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
dir.exists(&META_FILEPATH)
}
@@ -148,18 +138,16 @@ impl Index {
Index::create(mmap_directory, schema)
}
/// Creates a new index given an implementation of the trait `Directory`.
///
/// If a directory previously existed, it will be erased.
/// Creates a new index given an implementation of the trait `Directory`
pub fn create<Dir: Directory>(dir: Dir, schema: Schema) -> crate::Result<Index> {
let directory = ManagedDirectory::wrap(dir)?;
Index::new_from_directory(directory, schema)
Index::from_directory(directory, schema)
}
/// Create a new index from a directory.
///
/// This will overwrite existing meta.json
fn new_from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> crate::Result<Index> {
save_new_metas(schema.clone(), directory.borrow_mut())?;
let metas = IndexMeta::with_schema(schema);
Index::create_from_metas(directory, &metas, SegmentMetaInventory::default())
@@ -250,8 +238,6 @@ impl Index {
/// Open the index using the provided directory
pub fn open<D: Directory>(directory: D) -> crate::Result<Index> {
let logger: &Logger = directory.logger();
slog::info!(logger, "index-open"; "directory" => format!("{:?}", directory));
let directory = ManagedDirectory::wrap(directory)?;
let inventory = SegmentMetaInventory::default();
let metas = load_metas(&directory, &inventory)?;
@@ -295,7 +281,7 @@ impl Index {
TantivyError::LockFailure(
err,
Some(
"Failed to acquire index lock. If you are using \
"Failed to acquire index lock. If you are using\
a regular directory, this means there is already an \
`IndexWriter` working on this `Directory`, in this process \
or in a different process."
@@ -312,15 +298,6 @@ impl Index {
)
}
/// Helper to create an index writer for tests.
///
/// That index writer only simply has a single thread and a heap of 5 MB.
/// Using a single thread gives us a deterministic allocation of DocId.
#[cfg(test)]
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
self.writer_with_num_threads(1, 10_000_000)
}
/// Creates a multithreaded writer
///
/// Tantivy will automatically define the number of threads to use.
@@ -523,7 +500,7 @@ mod tests {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let mut index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_for_tests().unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index
.reader_builder()
@@ -560,33 +537,23 @@ mod tests {
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
}
}
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
let mut reader_index = reader.index();
let (sender, receiver) = crossbeam::channel::unbounded();
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
let _ = sender.send(());
}));
let mut writer = index.writer_for_tests().unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64));
writer.commit().unwrap();
// We need a loop here because it is possible for notify to send more than
// one modify event. It was observed on CI on MacOS.
loop {
assert!(receiver.recv().is_ok());
if reader.searcher().num_docs() == 1 {
break;
}
}
assert!(receiver.recv().is_ok());
assert_eq!(reader.searcher().num_docs(), 1);
writer.add_document(doc!(field=>2u64));
writer.commit().unwrap();
// ... Same as above
loop {
assert!(receiver.recv().is_ok());
if reader.searcher().num_docs() == 2 {
break;
}
}
assert!(receiver.recv().is_ok());
assert_eq!(reader.searcher().num_docs(), 2);
}
// This test will not pass on windows, because windows

View File

@@ -213,7 +213,7 @@ pub struct IndexMeta {
#[serde(skip_serializing_if = "Option::is_none")]
/// Payload associated to the last commit.
///
/// Upon commit, clients can optionally add a small `String` payload to their commit
/// Upon commit, clients can optionally add a small `Striing` payload to their commit
/// to help identify this commit.
/// This payload is entirely unused by tantivy.
pub payload: Option<String>,

View File

@@ -3,6 +3,7 @@ use crate::directory::ReadOnlySource;
use crate::positions::PositionReader;
use crate::postings::TermInfo;
use crate::postings::{BlockSegmentPostings, SegmentPostings};
use crate::schema::FieldType;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::termdict::TermDictionary;
@@ -53,7 +54,10 @@ impl InvertedIndexReader {
/// Creates an empty `InvertedIndexReader` object, which
/// contains no terms at all.
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
let record_option = field_type
.get_index_record_option()
.unwrap_or(IndexRecordOption::Basic);
InvertedIndexReader {
termdict: TermDictionary::empty(),
postings_source: ReadOnlySource::empty(),

View File

@@ -143,7 +143,6 @@ impl Searcher {
collector.collect_segment(weight.as_ref(), segment_ord as u32, segment_reader)
},
segment_readers.iter().enumerate(),
self.index.logger().clone(),
)?;
collector.merge_fruits(fruits)
}

View File

@@ -21,12 +21,6 @@ use std::sync::atomic;
#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SegmentId(Uuid);
impl ToString for SegmentId {
fn to_string(&self) -> String {
self.short_uuid_string()
}
}
#[cfg(test)]
static AUTO_INC_COUNTER: Lazy<atomic::AtomicUsize> = Lazy::new(|| atomic::AtomicUsize::default());

View File

@@ -8,16 +8,15 @@ use crate::directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet;
use crate::fastfield::FacetReader;
use crate::fastfield::FastFieldReaders;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::fieldnorm::FieldNormReader;
use crate::schema::Field;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::schema::{Field, IndexRecordOption};
use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader;
use crate::termdict::TermDictionary;
use crate::DocId;
use fail::fail_point;
use slog::{warn, Logger};
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
@@ -49,12 +48,11 @@ pub struct SegmentReader {
positions_composite: CompositeFile,
positions_idx_composite: CompositeFile,
fast_fields_readers: Arc<FastFieldReaders>,
fieldnorm_readers: FieldNormReaders,
fieldnorms_composite: CompositeFile,
store_source: ReadOnlySource,
delete_bitset_opt: Option<DeleteBitSet>,
schema: Schema,
logger: Logger,
}
impl SegmentReader {
@@ -127,15 +125,17 @@ impl SegmentReader {
///
/// They are simply stored as a fast field, serialized in
/// the `.fieldnorm` file of the segment.
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
self.fieldnorm_readers.get_field(field).ok_or_else(|| {
pub fn get_fieldnorms_reader(&self, field: Field) -> FieldNormReader {
if let Some(fieldnorm_source) = self.fieldnorms_composite.open_read(field) {
FieldNormReader::open(fieldnorm_source)
} else {
let field_name = self.schema.get_field_name(field);
let err_msg = format!(
"Field norm not found for field {:?}. Was it market as indexed during indexing.",
field_name
);
crate::TantivyError::SchemaError(err_msg)
})
panic!(err_msg);
}
}
/// Accessor to the segment's `StoreReader`.
@@ -178,8 +178,8 @@ impl SegmentReader {
let fast_field_readers =
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
let fieldnorm_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
let delete_bitset_opt = if segment.meta().has_deletes() {
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
@@ -195,14 +195,13 @@ impl SegmentReader {
termdict_composite,
postings_composite,
fast_fields_readers: fast_field_readers,
fieldnorm_readers,
fieldnorms_composite,
segment_id: segment.id(),
store_source,
delete_bitset_opt,
positions_composite,
positions_idx_composite,
schema,
logger: segment.index().logger().clone(),
})
}
@@ -213,11 +212,6 @@ impl SegmentReader {
/// The field reader is in charge of iterating through the
/// term dictionary associated to a specific field,
/// and opening the posting list associated to any term.
///
/// If the field is marked as index, a warn is logged and an empty `InvertedIndexReader`
/// is returned.
/// Similarly if the field is marked as indexed but no term has been indexed for the given
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
if let Some(inv_idx_reader) = self
.inv_idx_reader_cache
@@ -232,25 +226,21 @@ impl SegmentReader {
let record_option_opt = field_type.get_index_record_option();
if record_option_opt.is_none() {
warn!(
self.logger,
"Field {:?} does not seem indexed.",
field_entry.name()
);
panic!("Field {:?} does not seem indexed.", field_entry.name());
}
let record_option = record_option_opt.unwrap();
let postings_source_opt = self.postings_composite.open_read(field);
if postings_source_opt.is_none() || record_option_opt.is_none() {
if postings_source_opt.is_none() {
// no documents in the segment contained this field.
// As a result, no data is associated to the inverted index.
//
// Returns an empty inverted index.
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
return Arc::new(InvertedIndexReader::empty(record_option));
return Arc::new(InvertedIndexReader::empty(field_type));
}
let record_option = record_option_opt.unwrap();
let postings_source = postings_source_opt.unwrap();
let termdict_source = self.termdict_composite.open_read(field).expect(
@@ -318,7 +308,7 @@ impl SegmentReader {
self.positions_composite.space_usage(),
self.positions_idx_composite.space_usage(),
self.fast_fields_readers.space_usage(),
self.fieldnorm_readers.space_usage(),
self.fieldnorms_composite.space_usage(),
self.get_store_reader().space_usage(),
self.delete_bitset_opt
.as_ref()
@@ -349,7 +339,7 @@ mod test {
let name = schema.get_field("name").unwrap();
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(name => "tantivy"));
index_writer.add_document(doc!(name => "horse"));
index_writer.add_document(doc!(name => "jockey"));

View File

@@ -1,5 +1,3 @@
use slog::{error, Logger};
use crate::directory::directory_lock::Lock;
use crate::directory::error::LockError;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
@@ -66,10 +64,7 @@ impl<T: Send + Sync + 'static> From<Box<T>> for DirectoryLock {
impl Drop for DirectoryLockGuard {
fn drop(&mut self) {
if let Err(e) = self.directory.delete(&*self.path) {
error!(
self.directory.logger(),
"Failed to remove the lock file. {:?}", e
);
error!("Failed to remove the lock file. {:?}", e);
}
}
}
@@ -85,7 +80,7 @@ fn try_acquire_lock(
) -> Result<DirectoryLock, TryAcquireLockError> {
let mut write = directory.open_write(filepath).map_err(|e| match e {
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
OpenWriteError::IOError { io_error, .. } => TryAcquireLockError::IOError(io_error),
OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
})?;
write.flush().map_err(TryAcquireLockError::IOError)?;
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
@@ -214,9 +209,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
/// `OnCommit` `ReloadPolicy` to work properly.
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle>;
/// Returns the `slog::Logger` configured for the `Directory`.
fn logger(&self) -> &Logger;
}
/// DirectoryClone

View File

@@ -1,60 +1,160 @@
use crate::Version;
use std::error::Error as StdError;
use std::fmt;
use std::io;
use std::path::PathBuf;
/// Error while trying to acquire a directory lock.
#[derive(Debug, Error)]
#[derive(Debug, Fail)]
pub enum LockError {
/// Failed to acquired a lock as it is already held by another
/// client.
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
#[error("Could not acquire lock as it is already held, possibly by a different process.")]
#[fail(
display = "Could not acquire lock as it is already held, possibly by a different process."
)]
LockBusy,
/// Trying to acquire a lock failed with an `IOError`
#[error("Failed to acquire the lock due to an io:Error.")]
#[fail(display = "Failed to acquire the lock due to an io:Error.")]
IOError(io::Error),
}
/// General IO error with an optional path to the offending file.
#[derive(Debug)]
pub struct IOError {
path: Option<PathBuf>,
err: io::Error,
}
impl Into<io::Error> for IOError {
fn into(self) -> io::Error {
self.err
}
}
impl fmt::Display for IOError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.path {
Some(ref path) => write!(f, "io error occurred on path '{:?}': '{}'", path, self.err),
None => write!(f, "io error occurred: '{}'", self.err),
}
}
}
impl StdError for IOError {
fn description(&self) -> &str {
"io error occurred"
}
fn cause(&self) -> Option<&dyn StdError> {
Some(&self.err)
}
}
impl IOError {
pub(crate) fn with_path(path: PathBuf, err: io::Error) -> Self {
IOError {
path: Some(path),
err,
}
}
}
impl From<io::Error> for IOError {
fn from(err: io::Error) -> IOError {
IOError { path: None, err }
}
}
/// Error that may occur when opening a directory
#[derive(Debug, Error)]
#[derive(Debug)]
pub enum OpenDirectoryError {
/// The underlying directory does not exists.
#[error("Directory does not exist: '{0}'.")]
DoesNotExist(PathBuf),
/// The path exists but is not a directory.
#[error("Path exists but is not a directory: '{0}'.")]
NotADirectory(PathBuf),
/// Failed to create a temp directory.
#[error("Failed to create a temporary directory: '{0}'.")]
FailedToCreateTempDir(io::Error),
/// IoError
#[error("IOError '{io_error:?}' while create directory in: '{directory_path:?}'.")]
IoError {
/// underlying io Error.
io_error: io::Error,
/// directory we tried to open.
directory_path: PathBuf,
},
IoError(io::Error),
}
impl From<io::Error> for OpenDirectoryError {
fn from(io_err: io::Error) -> Self {
OpenDirectoryError::IoError(io_err)
}
}
impl fmt::Display for OpenDirectoryError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenDirectoryError::DoesNotExist(ref path) => {
write!(f, "the underlying directory '{:?}' does not exist", path)
}
OpenDirectoryError::NotADirectory(ref path) => {
write!(f, "the path '{:?}' exists but is not a directory", path)
}
OpenDirectoryError::IoError(ref err) => write!(
f,
"IOError while trying to open/create the directory. {:?}",
err
),
}
}
}
impl StdError for OpenDirectoryError {
fn description(&self) -> &str {
"error occurred while opening a directory"
}
fn cause(&self) -> Option<&dyn StdError> {
None
}
}
/// Error that may occur when starting to write in a file
#[derive(Debug, Error)]
#[derive(Debug)]
pub enum OpenWriteError {
/// Our directory is WORM, writing an existing file is forbidden.
/// Checkout the `Directory` documentation.
#[error("File already exists: '{0}'")]
FileAlreadyExists(PathBuf),
/// Any kind of IO error that happens when
/// writing in the underlying IO device.
#[error("IOError '{io_error:?}' while opening file for write: '{filepath}'.")]
IOError {
/// The underlying `io::Error`.
io_error: io::Error,
/// File path of the file that tantivy failed to open for write.
filepath: PathBuf,
},
IOError(IOError),
}
impl From<IOError> for OpenWriteError {
fn from(err: IOError) -> OpenWriteError {
OpenWriteError::IOError(err)
}
}
impl fmt::Display for OpenWriteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenWriteError::FileAlreadyExists(ref path) => {
write!(f, "the file '{:?}' already exists", path)
}
OpenWriteError::IOError(ref err) => write!(
f,
"an io error occurred while opening a file for writing: '{}'",
err
),
}
}
}
impl StdError for OpenWriteError {
fn description(&self) -> &str {
"error occurred while opening a file for writing"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenWriteError::FileAlreadyExists(_) => None,
OpenWriteError::IOError(ref err) => Some(err),
}
}
}
/// Type of index incompatibility between the library and the index found on disk
@@ -117,41 +217,55 @@ impl fmt::Debug for Incompatibility {
}
/// Error that may occur when accessing a file read
#[derive(Debug, Error)]
#[derive(Debug)]
pub enum OpenReadError {
/// The file does not exists.
#[error("Files does not exists: {0:?}")]
FileDoesNotExist(PathBuf),
/// Any kind of io::Error.
#[error(
"IOError: '{io_error:?}' happened while opening the following file for Read: {filepath}."
)]
IOError {
/// The underlying `io::Error`.
io_error: io::Error,
/// File path of the file that tantivy failed to open for read.
filepath: PathBuf,
},
/// This library does not support the index version found in file footer.
#[error("Index version unsupported: {0:?}")]
IncompatibleIndex(Incompatibility),
}
/// Error that may occur when trying to delete a file
#[derive(Debug, Error)]
pub enum DeleteError {
/// The file does not exists.
#[error("File does not exists: '{0}'.")]
FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when
/// interacting with the underlying IO device.
#[error("The following IO error happened while deleting file '{filepath}': '{io_error:?}'.")]
IOError {
/// The underlying `io::Error`.
io_error: io::Error,
/// File path of the file that tantivy failed to delete.
filepath: PathBuf,
},
IOError(IOError),
/// This library doesn't support the index version found on disk
IncompatibleIndex(Incompatibility),
}
impl From<IOError> for OpenReadError {
fn from(err: IOError) -> OpenReadError {
OpenReadError::IOError(err)
}
}
impl fmt::Display for OpenReadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
OpenReadError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
OpenReadError::IOError(ref err) => write!(
f,
"an io error occurred while opening a file for reading: '{}'",
err
),
OpenReadError::IncompatibleIndex(ref footer) => {
write!(f, "Incompatible index format: {:?}", footer)
}
}
}
}
/// Error that may occur when trying to delete a file
#[derive(Debug)]
pub enum DeleteError {
/// The file does not exists.
FileDoesNotExist(PathBuf),
/// Any kind of IO error that happens when
/// interacting with the underlying IO device.
IOError(IOError),
}
impl From<IOError> for DeleteError {
fn from(err: IOError) -> DeleteError {
DeleteError::IOError(err)
}
}
impl From<Incompatibility> for OpenReadError {
@@ -159,3 +273,29 @@ impl From<Incompatibility> for OpenReadError {
OpenReadError::IncompatibleIndex(incompatibility)
}
}
impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DeleteError::FileDoesNotExist(ref path) => {
write!(f, "the file '{:?}' does not exist", path)
}
DeleteError::IOError(ref err) => {
write!(f, "an io error occurred while deleting a file: '{}'", err)
}
}
}
}
impl StdError for DeleteError {
fn description(&self) -> &str {
"error occurred while deleting a file"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
DeleteError::FileDoesNotExist(_) => None,
DeleteError::IOError(ref err) => Some(err),
}
}
}

View File

@@ -94,24 +94,12 @@ impl Footer {
match &self.versioned_footer {
VersionedFooter::V1 {
crc32: _crc,
store_compression,
store_compression: compression,
} => {
if &library_version.store_compression != store_compression {
if &library_version.store_compression != compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(),
});
}
Ok(())
}
VersionedFooter::V2 {
crc32: _crc,
store_compression,
} => {
if &library_version.store_compression != store_compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: store_compression.to_string(),
index_compression_format: compression.to_string(),
});
}
Ok(())
@@ -132,29 +120,24 @@ pub enum VersionedFooter {
crc32: CrcHashU32,
store_compression: String,
},
// Introduction of the Block WAND information.
V2 {
crc32: CrcHashU32,
store_compression: String,
},
}
impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new();
match self {
VersionedFooter::V2 {
VersionedFooter::V1 {
crc32,
store_compression: compression,
} => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&2u32, &mut buf)?;
BinarySerializable::serialize(&1u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?;
}
VersionedFooter::V1 { .. } | VersionedFooter::UnknownVersion => {
VersionedFooter::UnknownVersion => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ",
@@ -183,30 +166,22 @@ impl BinarySerializable for VersionedFooter {
reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..];
let version = u32::deserialize(&mut cursor)?;
if version != 1 && version != 2 {
return Ok(VersionedFooter::UnknownVersion);
}
let crc32 = u32::deserialize(&mut cursor)?;
let store_compression = String::deserialize(&mut cursor)?;
Ok(if version == 1 {
VersionedFooter::V1 {
if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?;
let compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 {
crc32,
store_compression,
}
store_compression: compression,
})
} else {
assert_eq!(version, 2);
VersionedFooter::V2 {
crc32,
store_compression,
}
})
Ok(VersionedFooter::UnknownVersion)
}
}
}
impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> {
match self {
VersionedFooter::V2 { crc32, .. } => Some(*crc32),
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
VersionedFooter::UnknownVersion { .. } => None,
}
@@ -244,7 +219,7 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc32 = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V2 {
let footer = Footer::new(VersionedFooter::V1 {
crc32,
store_compression: crate::store::COMPRESSION.to_string(),
});
@@ -271,17 +246,17 @@ mod tests {
let mut vec = Vec::new();
let footer_proxy = FooterProxy::new(&mut vec);
assert!(footer_proxy.terminate().is_ok());
if crate::store::COMPRESSION == "lz4" {
assert_eq!(vec.len(), 158);
} else {
assert_eq!(vec.len(), 167);
}
assert_eq!(vec.len(), 167);
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
assert!(matches!(
footer.versioned_footer,
VersionedFooter::V2 { store_compression, .. }
if store_compression == crate::store::COMPRESSION
));
if let VersionedFooter::V1 {
crc32: _,
store_compression,
} = footer.versioned_footer
{
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version());
}
@@ -289,7 +264,7 @@ mod tests {
fn test_serialize_deserialize_footer() {
let mut buffer = Vec::new();
let crc32 = 123456u32;
let footer: Footer = Footer::new(VersionedFooter::V2 {
let footer: Footer = Footer::new(VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
});
@@ -301,7 +276,7 @@ mod tests {
#[test]
fn footer_length() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V2 {
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
@@ -322,7 +297,7 @@ mod tests {
// versionned footer length
12 | 128,
// index format version
2,
1,
0,
0,
0,
@@ -341,7 +316,7 @@ mod tests {
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V2 {
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
crc32: expected_crc,
store_compression: "lz4".to_string(),
};

View File

@@ -1,5 +1,5 @@
use crate::core::{MANAGED_FILEPATH, META_FILEPATH};
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError};
use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock;
use crate::directory::GarbageCollectionResult;
@@ -11,9 +11,9 @@ use crate::error::DataCorruption;
use crate::Directory;
use crc32fast::Hasher;
use slog::{debug, error, info};
use std::collections::HashSet;
use std::io;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::result;
use std::sync::RwLockWriteGuard;
@@ -56,9 +56,9 @@ fn save_managed_paths(
directory: &mut dyn Directory,
wlock: &RwLockWriteGuard<'_, MetaInformation>,
) -> io::Result<()> {
let mut managed_json = serde_json::to_string_pretty(&wlock.managed_paths)?;
managed_json.push_str("\n");
directory.atomic_write(&MANAGED_FILEPATH, managed_json.as_bytes())?;
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
writeln!(&mut w)?;
directory.atomic_write(&MANAGED_FILEPATH, &w[..])?;
Ok(())
}
@@ -86,12 +86,7 @@ impl ManagedDirectory {
directory: Box::new(directory),
meta_informations: Arc::default(),
}),
Err(OpenReadError::IOError { io_error, filepath }) => {
Err(crate::TantivyError::OpenReadError(OpenReadError::IOError {
io_error,
filepath,
}))
}
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
// For the moment, this should never happen `meta.json`
// do not have any footer and cannot detect incompatibility.
@@ -118,7 +113,7 @@ impl ManagedDirectory {
&mut self,
get_living_files: L,
) -> crate::Result<GarbageCollectionResult> {
info!(self.directory.logger(), "gc"; "stage"=>"start");
info!("Garbage collect");
let mut files_to_delete = vec![];
// It is crucial to get the living files after acquiring the
@@ -153,7 +148,7 @@ impl ManagedDirectory {
}
}
Err(err) => {
error!(self.logger(), "Failed to acquire lock for GC");
error!("Failed to acquire lock for GC");
return Err(crate::TantivyError::from(err));
}
}
@@ -165,7 +160,7 @@ impl ManagedDirectory {
for file_to_delete in files_to_delete {
match self.delete(&file_to_delete) {
Ok(_) => {
debug!(self.logger(), "deleted-success"; "file"=>format!("{:?}", file_to_delete));
info!("Deleted {:?}", file_to_delete);
deleted_files.push(file_to_delete);
}
Err(file_error) => {
@@ -173,12 +168,12 @@ impl ManagedDirectory {
DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete.clone());
}
DeleteError::IOError { .. } => {
DeleteError::IOError(_) => {
failed_to_delete_files.push(file_to_delete.clone());
if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file
// is mmapped.
error!(self.logger(), "delete-file-fail"; "path"=>file_to_delete.to_str().unwrap_or("<invalid-utf8>"));
error!("Failed to delete {:?}", file_to_delete);
}
}
}
@@ -200,10 +195,6 @@ impl ManagedDirectory {
save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?;
}
info!(self.directory.logger(), "gc"; "stage"=>"end",
"num-sucess-file-deletes"=>deleted_files.len(),
"num-failed-file-deletes"=>failed_to_delete_files.len());
Ok(GarbageCollectionResult {
deleted_files,
failed_to_delete_files,
@@ -240,11 +231,8 @@ impl ManagedDirectory {
/// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?;
let (footer, data) =
Footer::extract_footer(reader).map_err(|io_error| OpenReadError::IOError {
io_error,
filepath: path.to_path_buf(),
})?;
let (footer, data) = Footer::extract_footer(reader)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
let mut hasher = Hasher::new();
hasher.update(data.as_slice());
let crc = hasher.finalize();
@@ -257,46 +245,35 @@ impl ManagedDirectory {
/// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut managed_paths = self
let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations
.read()
.expect("Managed directory rlock poisoned in list damaged.")
.managed_paths
.clone();
managed_paths.remove(*META_FILEPATH);
let mut damaged_files = HashSet::new();
for path in managed_paths {
for path in managed_paths.into_iter() {
if !self.validate_checksum(&path)? {
damaged_files.insert(path);
hashset.insert(path);
}
}
Ok(damaged_files)
Ok(hashset)
}
}
impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
slog::debug!(self.logger(), "open-read"; "path" => path.to_str().unwrap_or("<invalid-utf8>"));
let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(read_only_source).map_err(|io_error| {
OpenReadError::IOError {
io_error,
filepath: path.to_path_buf(),
}
})?;
let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?;
Ok(reader)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
slog::debug!(self.logger(), "open-write"; "path" => path.to_str().unwrap_or("<invalid-utf8>"));
self.register_file_as_managed(path)
.map_err(|io_error| OpenWriteError::IOError {
io_error,
filepath: path.to_path_buf(),
})?;
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
self.directory
.open_write(path)?
@@ -306,11 +283,9 @@ impl Directory for ManagedDirectory {
))))
}
fn atomic_write(&mut self, path: &Path, content: &[u8]) -> io::Result<()> {
let content_str = std::str::from_utf8(content).unwrap_or("<content-not-utf-8>");
slog::debug!(self.logger(), "Atomic write"; "path" => format!("{:?}", path), "content_length"=>content_str);
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
self.register_file_as_managed(path)?;
self.directory.atomic_write(path, content)
self.directory.atomic_write(path, data)
}
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
@@ -332,10 +307,6 @@ impl Directory for ManagedDirectory {
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
self.directory.watch(watch_callback)
}
fn logger(&self) -> &slog::Logger {
self.directory.logger()
}
}
impl Clone for ManagedDirectory {

View File

@@ -1,6 +1,8 @@
use crate::core::META_FILEPATH;
use crate::directory::error::LockError;
use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory;
@@ -17,8 +19,6 @@ use notify::RawEvent;
use notify::RecursiveMode;
use notify::Watcher;
use serde::{Deserialize, Serialize};
use slog::{debug, o, Drain, Logger};
use slog_stdlog::StdLog;
use std::collections::HashMap;
use std::convert::From;
use std::fmt;
@@ -36,6 +36,11 @@ use std::sync::Weak;
use std::thread;
use tempfile::TempDir;
/// Create a default io error given a string.
pub(crate) fn make_io_err(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg)
}
/// Returns None iff the file exists, can be read, but is empty (and hence
/// cannot be mmapped)
fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
@@ -43,17 +48,13 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
if e.kind() == io::ErrorKind::NotFound {
OpenReadError::FileDoesNotExist(full_path.to_owned())
} else {
OpenReadError::IOError {
io_error: e,
filepath: full_path.to_owned(),
}
OpenReadError::IOError(IOError::with_path(full_path.to_owned(), e))
}
})?;
let meta_data = file.metadata().map_err(|e| OpenReadError::IOError {
io_error: e,
filepath: full_path.to_owned(),
})?;
let meta_data = file
.metadata()
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
if meta_data.len() == 0 {
// if the file size is 0, it will not be possible
// to mmap the file, so we return None
@@ -63,10 +64,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
unsafe {
memmap::Mmap::map(&file)
.map(Some)
.map_err(|e| OpenReadError::IOError {
io_error: e,
filepath: full_path.to_owned(),
})
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
}
}
@@ -146,7 +144,7 @@ struct WatcherWrapper {
}
impl WatcherWrapper {
pub(crate) fn new(path: &Path, logger: Logger) -> Result<Self, OpenDirectoryError> {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let watcher = notify::raw_watcher(tx)
@@ -160,8 +158,7 @@ impl WatcherWrapper {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_router: Arc<WatchCallbackList> =
Arc::new(WatchCallbackList::with_logger(logger));
let watcher_router: Arc<WatchCallbackList> = Default::default();
let watcher_router_clone = watcher_router.clone();
thread::Builder::new()
.name("meta-file-watch-thread".to_string())
@@ -186,10 +183,6 @@ impl WatcherWrapper {
}
}
}
})
.map_err(|io_error| OpenDirectoryError::IoError {
io_error,
directory_path: path.to_path_buf(),
})?;
Ok(WatcherWrapper {
_watcher: Mutex::new(watcher),
@@ -224,21 +217,15 @@ struct MmapDirectoryInner {
mmap_cache: RwLock<MmapCache>,
_temp_directory: Option<TempDir>,
watcher: RwLock<Option<WatcherWrapper>>,
logger: Logger,
}
impl MmapDirectoryInner {
fn new(
root_path: PathBuf,
temp_directory: Option<TempDir>,
logger: Logger,
) -> MmapDirectoryInner {
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectoryInner {
MmapDirectoryInner {
root_path,
mmap_cache: Default::default(),
_temp_directory: temp_directory,
watcher: RwLock::new(None),
logger,
}
}
@@ -250,7 +237,7 @@ impl MmapDirectoryInner {
// The downside is that we might create a watch wrapper that is not useful.
let need_initialization = self.watcher.read().unwrap().is_none();
if need_initialization {
let watch_wrapper = WatcherWrapper::new(&self.root_path, self.logger.clone())?;
let watch_wrapper = WatcherWrapper::new(&self.root_path)?;
let mut watch_wlock = self.watcher.write().unwrap();
// the watcher could have been initialized when we released the lock, and
// we do not want to lose the watched files that were set.
@@ -273,8 +260,8 @@ impl fmt::Debug for MmapDirectory {
}
impl MmapDirectory {
fn new(root_path: PathBuf, temp_directory: Option<TempDir>, logger: Logger) -> MmapDirectory {
let inner = MmapDirectoryInner::new(root_path, temp_directory, logger);
fn new(root_path: PathBuf, temp_directory: Option<TempDir>) -> MmapDirectory {
let inner = MmapDirectoryInner::new(root_path, temp_directory);
MmapDirectory {
inner: Arc::new(inner),
}
@@ -285,19 +272,16 @@ impl MmapDirectory {
/// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RAMDirectory.
pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
let tempdir = TempDir::new().map_err(OpenDirectoryError::FailedToCreateTempDir)?;
let logger = Logger::root(StdLog.fuse(), o!());
Ok(MmapDirectory::new(tempdir.path().to_owned(), Some(tempdir), logger))
let tempdir = TempDir::new().map_err(OpenDirectoryError::IoError)?;
let tempdir_path = PathBuf::from(tempdir.path());
Ok(MmapDirectory::new(tempdir_path, Some(tempdir)))
}
/// Opens a MmapDirectory in a directory.
///
/// Returns an error if the `directory_path` does not
/// exist or if it is not a directory.
pub fn open_with_logger<P: AsRef<Path>>(
directory_path: P,
logger: Logger,
) -> Result<MmapDirectory, OpenDirectoryError> {
pub fn open<P: AsRef<Path>>(directory_path: P) -> Result<MmapDirectory, OpenDirectoryError> {
let directory_path: &Path = directory_path.as_ref();
if !directory_path.exists() {
Err(OpenDirectoryError::DoesNotExist(PathBuf::from(
@@ -308,20 +292,10 @@ impl MmapDirectory {
directory_path,
)))
} else {
Ok(MmapDirectory::new(
PathBuf::from(directory_path),
None,
logger,
))
Ok(MmapDirectory::new(PathBuf::from(directory_path), None))
}
}
/// Creates an `MmapDirectory` at the given path.
pub fn open<P: AsRef<Path>>(directory_path: P) -> Result<MmapDirectory, OpenDirectoryError> {
let logger = Logger::root(StdLog.fuse(), o!());
Self::open_with_logger(directory_path, logger)
}
/// Joins a relative_path to the directory `root_path`
/// to create a proper complete `filepath`.
fn resolve_path(&self, relative_path: &Path) -> PathBuf {
@@ -381,12 +355,11 @@ impl MmapDirectory {
struct ReleaseLockFile {
_file: File,
path: PathBuf,
logger: Logger,
}
impl Drop for ReleaseLockFile {
fn drop(&mut self) {
debug!(self.logger, "Releasing lock {:?}", self.path);
debug!("Releasing lock {:?}", self.path);
}
}
@@ -425,18 +398,16 @@ impl TerminatingWrite for SafeFileWriter {
impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
let io_error = io::Error::new(io::ErrorKind::Other, msg);
OpenReadError::IOError {
io_error,
filepath: path.to_owned(),
}
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
@@ -449,18 +420,14 @@ impl Directory for MmapDirectory {
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
let full_path = self.resolve_path(path);
match fs::remove_file(&full_path) {
Ok(_) => self.sync_directory().map_err(|e| DeleteError::IOError {
io_error: e,
filepath: path.to_path_buf(),
}),
Ok(_) => self
.sync_directory()
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(DeleteError::FileDoesNotExist(path.to_owned()))
} else {
Err(DeleteError::IOError {
io_error: e,
filepath: path.to_path_buf(),
})
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
@@ -472,7 +439,9 @@ impl Directory for MmapDirectory {
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path);
let open_res = OpenOptions::new()
.write(true)
.create_new(true)
@@ -482,25 +451,18 @@ impl Directory for MmapDirectory {
if err.kind() == io::ErrorKind::AlreadyExists {
OpenWriteError::FileAlreadyExists(path.to_owned())
} else {
OpenWriteError::IOError {
io_error: err,
filepath: path.to_owned(),
}
IOError::with_path(path.to_owned(), err).into()
}
})?;
// making sure the file is created.
file.flush().map_err(|io_error| OpenWriteError::IOError {
io_error,
filepath: path.to_owned(),
})?;
file.flush()
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
// Apparetntly, on some filesystem syncing the parent
// directory is required.
self.sync_directory().map_err(|e| OpenWriteError::IOError {
io_error: e,
filepath: path.to_owned(),
})?;
self.sync_directory()
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
let writer = SafeFileWriter::new(file);
Ok(BufWriter::new(Box::new(writer)))
@@ -512,31 +474,24 @@ impl Directory for MmapDirectory {
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|io_error| OpenReadError::IOError {
io_error,
filepath: path.to_owned(),
})?;
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(io_error) => {
if io_error.kind() == io::ErrorKind::NotFound {
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(OpenReadError::IOError {
io_error,
filepath: path.to_owned(),
})
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn atomic_write(&mut self, path: &Path, content: &[u8]) -> io::Result<()> {
let mut tempfile = tempfile::Builder::new().tempfile_in(&self.inner.root_path)?;
tempfile.write_all(content)?;
tempfile.flush()?;
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path);
let full_path = self.resolve_path(path);
tempfile.into_temp_path().persist(full_path)?;
let meta_file = atomicwrites::AtomicFile::new(full_path, atomicwrites::AllowOverwrite);
meta_file.write(|f| f.write_all(data))?;
Ok(())
}
@@ -553,22 +508,16 @@ impl Directory for MmapDirectory {
} else {
file.try_lock_exclusive().map_err(|_| LockError::LockBusy)?
}
let logger = self.inner.logger.clone();
// dropping the file handle will release the lock.
Ok(DirectoryLock::from(Box::new(ReleaseLockFile {
path: lock.filepath.clone(),
_file: file,
logger,
})))
}
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
self.inner.watch(watch_callback)
}
fn logger(&self) -> &Logger {
&self.inner.logger
}
}
#[cfg(test)]
@@ -678,8 +627,7 @@ mod tests {
let counter_clone = counter.clone();
let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp_dirpath = tmp_dir.path().to_owned();
let logger = Logger::root(slog::Discard, o!());
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath, logger).unwrap();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join(*META_FILEPATH);
let _handle = watch_wrapper.watch(Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
@@ -704,7 +652,7 @@ mod tests {
{
let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_merge_size(3);
index_writer.set_merge_policy(Box::new(log_merge_policy));

View File

@@ -23,8 +23,7 @@ pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
use std::io::{self, BufWriter, Write};
use std::path::PathBuf;
/// Outcome of the Garbage collection

View File

@@ -5,8 +5,6 @@ use crate::directory::WatchCallbackList;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr};
use fail::fail_point;
use slog::{o, Drain, Logger};
use slog_stdlog::StdLog;
use std::collections::HashMap;
use std::fmt;
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
@@ -68,7 +66,7 @@ impl Write for VecWriter {
fn flush(&mut self) -> io::Result<()> {
self.is_flushed = true;
let mut fs = self.shared_directory.fs.inner_directory.write().unwrap();
let mut fs = self.shared_directory.fs.write().unwrap();
fs.write(self.path.clone(), self.data.get_ref());
Ok(())
}
@@ -80,19 +78,13 @@ impl TerminatingWrite for VecWriter {
}
}
#[derive(Default)]
struct InnerDirectory {
fs: HashMap<PathBuf, ReadOnlySource>,
watch_router: WatchCallbackList,
}
impl InnerDirectory {
fn with_logger(logger: Logger) -> Self {
InnerDirectory {
fs: Default::default(),
watch_router: WatchCallbackList::with_logger(logger.clone()),
}
}
fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
let data = ReadOnlySource::new(Vec::from(data));
self.fs.insert(path, data).is_some()
@@ -125,32 +117,20 @@ impl InnerDirectory {
}
}
impl Default for RAMDirectory {
fn default() -> RAMDirectory {
let logger = Logger::root(StdLog.fuse(), o!());
Self::with_logger(logger)
}
}
impl fmt::Debug for RAMDirectory {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "RAMDirectory")
}
}
struct Inner {
inner_directory: RwLock<InnerDirectory>,
logger: Logger,
}
/// A Directory storing everything in anonymous memory.
///
/// It is mainly meant for unit testing.
/// Writes are only made visible upon flushing.
///
#[derive(Clone)]
#[derive(Clone, Default)]
pub struct RAMDirectory {
fs: Arc<Inner>,
fs: Arc<RwLock<InnerDirectory>>,
}
impl RAMDirectory {
@@ -159,21 +139,10 @@ impl RAMDirectory {
Self::default()
}
/// Create a `RAMDirectory` with a custom logger.
pub fn with_logger(logger: Logger) -> RAMDirectory {
let inner_directory = InnerDirectory::with_logger(logger.clone()).into();
RAMDirectory {
fs: Arc::new(Inner {
inner_directory,
logger,
}),
}
}
/// Returns the sum of the size of the different files
/// in the RAMDirectory.
pub fn total_mem_usage(&self) -> usize {
self.fs.inner_directory.read().unwrap().total_mem_usage()
self.fs.read().unwrap().total_mem_usage()
}
/// Write a copy of all of the files saved in the RAMDirectory in the target `Directory`.
@@ -183,7 +152,7 @@ impl RAMDirectory {
///
/// If an error is encounterred, files may be persisted partially.
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
let wlock = self.fs.inner_directory.write().unwrap();
let wlock = self.fs.write().unwrap();
for (path, source) in wlock.fs.iter() {
let mut dest_wrt = dest.open_write(path)?;
dest_wrt.write_all(source.as_slice())?;
@@ -195,25 +164,24 @@ impl RAMDirectory {
impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.inner_directory.read().unwrap().open_read(path)
self.fs.read().unwrap().open_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| {
Err(DeleteError::IOError {
io_error: io::Error::from(io::ErrorKind::Other),
filepath: path.to_path_buf(),
})
use crate::directory::error::IOError;
let io_error = IOError::from(io::Error::from(io::ErrorKind::Other));
Err(DeleteError::from(io_error))
});
self.fs.inner_directory.write().unwrap().delete(path)
self.fs.write().unwrap().delete(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.inner_directory.read().unwrap().exists(path)
self.fs.read().unwrap().exists(path)
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.inner_directory.write().unwrap();
let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path);
let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
let exists = fs.write(path_buf.clone(), &[]);
@@ -237,38 +205,19 @@ impl Directory for RAMDirectory {
let path_buf = PathBuf::from(path);
// Reserve the path to prevent calls to .write() to succeed.
self.fs
.inner_directory
.write()
.unwrap()
.write(path_buf.clone(), &[]);
self.fs.write().unwrap().write(path_buf.clone(), &[]);
let mut vec_writer = VecWriter::new(path_buf, self.clone());
vec_writer.write_all(data)?;
vec_writer.flush()?;
if path == Path::new(&*META_FILEPATH) {
let _ = self
.fs
.inner_directory
.write()
.unwrap()
.watch_router
.broadcast();
let _ = self.fs.write().unwrap().watch_router.broadcast();
}
Ok(())
}
fn watch(&self, watch_callback: WatchCallback) -> crate::Result<WatchHandle> {
Ok(self
.fs
.inner_directory
.write()
.unwrap()
.watch(watch_callback))
}
fn logger(&self) -> &Logger {
&self.fs.logger
Ok(self.fs.write().unwrap().watch(watch_callback))
}
}

View File

@@ -211,19 +211,19 @@ fn test_watch(directory: &mut dyn Directory) {
.unwrap();
for i in 0..10 {
assert!(i <= counter.load(SeqCst));
assert_eq!(i, counter.load(SeqCst));
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok());
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
assert!(i + 1 <= counter.load(SeqCst)); // notify can trigger more than once.
assert_eq!(i + 1, counter.load(SeqCst));
}
mem::drop(watch_handle);
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok());
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
assert!(10 <= counter.load(SeqCst));
assert_eq!(10, counter.load(SeqCst));
}
fn test_lock_non_blocking(directory: &mut dyn Directory) {

View File

@@ -1,20 +1,19 @@
use futures::channel::oneshot;
use futures::{Future, TryFutureExt};
use slog::{error, Logger};
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::Weak;
/// Type alias for callbacks registered when watching files of a `Directory`.
pub type WatchCallback = Box<dyn Fn() + Sync + Send>;
pub type WatchCallback = Box<dyn Fn() -> () + Sync + Send>;
/// Helper struct to implement the watch method in `Directory` implementations.
///
/// It registers callbacks (See `.subscribe(...)`) and
/// calls them upon calls to `.broadcast(...)`.
pub(crate) struct WatchCallbackList {
#[derive(Default)]
pub struct WatchCallbackList {
router: RwLock<Vec<Weak<WatchCallback>>>,
logger: Logger,
}
/// Controls how long a directory should watch for a file change.
@@ -33,14 +32,7 @@ impl WatchHandle {
}
impl WatchCallbackList {
pub fn with_logger(logger: Logger) -> Self {
WatchCallbackList {
logger,
router: Default::default(),
}
}
/// Subscribes a new callback and returns a handle that controls the lifetime of the callback.
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
let watch_callback_arc = Arc::new(watch_callback);
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
@@ -82,8 +74,8 @@ impl WatchCallbackList {
});
if let Err(err) = spawn_res {
error!(
self.logger,
"Failed to spawn thread to call watch callbacks. Cause: {:?}", err
"Failed to spawn thread to call watch callbacks. Cause: {:?}",
err
);
}
result
@@ -94,18 +86,13 @@ impl WatchCallbackList {
mod tests {
use crate::directory::WatchCallbackList;
use futures::executor::block_on;
use slog::{o, Discard, Logger};
use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
fn default_watch_callback_list() -> WatchCallbackList {
WatchCallbackList::with_logger(Logger::root(Discard, o!()))
}
#[test]
fn test_watch_event_router_simple() {
let watch_event_router = default_watch_callback_list();
let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let inc_callback = Box::new(move || {
@@ -132,7 +119,7 @@ mod tests {
#[test]
fn test_watch_event_router_multiple_callback_same_key() {
let watch_event_router = default_watch_callback_list();
let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default();
let inc_callback = |inc: usize| {
let counter_clone = counter.clone();
@@ -161,7 +148,7 @@ mod tests {
#[test]
fn test_watch_event_router_multiple_callback_different_key() {
let watch_event_router = default_watch_callback_list();
let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let inc_callback = Box::new(move || {

View File

@@ -38,7 +38,6 @@ pub trait DocSet {
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a DocSet.
fn seek(&mut self, target: DocId) -> DocId {
let mut doc = self.doc();
debug_assert!(doc <= target);
while doc < target {
doc = self.advance();
}

View File

@@ -2,13 +2,11 @@
use std::io;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError;
use crate::query;
use crate::{
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
schema,
};
use crate::schema;
use std::fmt;
use std::path::PathBuf;
use std::sync::PoisonError;
@@ -45,47 +43,44 @@ impl fmt::Debug for DataCorruption {
}
}
/// The library's error enum
#[derive(Debug, Error)]
/// The library's failure based error enum
#[derive(Debug, Fail)]
pub enum TantivyError {
/// Failed to open the directory.
#[error("Failed to open the directory: '{0:?}'")]
OpenDirectoryError(#[from] OpenDirectoryError),
/// Failed to open a file for read.
#[error("Failed to open file for read: '{0:?}'")]
OpenReadError(#[from] OpenReadError),
/// Failed to open a file for write.
#[error("Failed to open file for write: '{0:?}'")]
OpenWriteError(#[from] OpenWriteError),
/// Path does not exist.
#[fail(display = "Path does not exist: '{:?}'", _0)]
PathDoesNotExist(PathBuf),
/// File already exists, this is a problem when we try to write into a new file.
#[fail(display = "File already exists: '{:?}'", _0)]
FileAlreadyExists(PathBuf),
/// Index already exists in this directory
#[error("Index already exists")]
#[fail(display = "Index already exists")]
IndexAlreadyExists,
/// Failed to acquire file lock
#[error("Failed to acquire Lockfile: {0:?}. {1:?}")]
#[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
LockFailure(LockError, Option<String>),
/// IO Error.
#[error("An IO error occurred: '{0}'")]
IOError(#[from] io::Error),
#[fail(display = "An IO error occurred: '{}'", _0)]
IOError(#[cause] IOError),
/// Data corruption.
#[error("Data corrupted: '{0:?}'")]
#[fail(display = "{:?}", _0)]
DataCorruption(DataCorruption),
/// A thread holding the locked panicked and poisoned the lock.
#[error("A thread holding the locked panicked and poisoned the lock")]
#[fail(display = "A thread holding the locked panicked and poisoned the lock")]
Poisoned,
/// Invalid argument was passed by the user.
#[error("An invalid argument was passed: '{0}'")]
#[fail(display = "An invalid argument was passed: '{}'", _0)]
InvalidArgument(String),
/// An Error happened in one of the thread.
#[error("An error occurred in a thread: '{0}'")]
#[fail(display = "An error occurred in a thread: '{}'", _0)]
ErrorInThread(String),
/// An Error appeared related to the schema.
#[error("Schema error: '{0}'")]
#[fail(display = "Schema error: '{}'", _0)]
SchemaError(String),
/// System error. (e.g.: We failed spawning a new thread)
#[error("System error.'{0}'")]
#[fail(display = "System error.'{}'", _0)]
SystemError(String),
/// Index incompatible with current version of tantivy
#[error("{0:?}")]
#[fail(display = "{:?}", _0)]
IncompatibleIndex(Incompatibility),
}
@@ -94,17 +89,31 @@ impl From<DataCorruption> for TantivyError {
TantivyError::DataCorruption(data_corruption)
}
}
impl From<FastFieldNotAvailableError> for TantivyError {
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
TantivyError::SchemaError(format!("{}", fastfield_error))
}
}
impl From<LockError> for TantivyError {
fn from(lock_error: LockError) -> TantivyError {
TantivyError::LockFailure(lock_error, None)
}
}
impl From<IOError> for TantivyError {
fn from(io_error: IOError) -> TantivyError {
TantivyError::IOError(io_error)
}
}
impl From<io::Error> for TantivyError {
fn from(io_error: io::Error) -> TantivyError {
TantivyError::IOError(io_error.into())
}
}
impl From<query::QueryParserError> for TantivyError {
fn from(parsing_error: query::QueryParserError) -> TantivyError {
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
@@ -117,12 +126,49 @@ impl<Guard> From<PoisonError<Guard>> for TantivyError {
}
}
impl From<OpenReadError> for TantivyError {
fn from(error: OpenReadError) -> TantivyError {
match error {
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
OpenReadError::IncompatibleIndex(incompatibility) => {
TantivyError::IncompatibleIndex(incompatibility)
}
}
}
}
impl From<schema::DocParsingError> for TantivyError {
fn from(error: schema::DocParsingError) -> TantivyError {
TantivyError::InvalidArgument(format!("Failed to parse document {:?}", error))
}
}
impl From<OpenWriteError> for TantivyError {
fn from(error: OpenWriteError) -> TantivyError {
match error {
OpenWriteError::FileAlreadyExists(filepath) => {
TantivyError::FileAlreadyExists(filepath)
}
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
}
}
}
impl From<OpenDirectoryError> for TantivyError {
fn from(error: OpenDirectoryError) -> TantivyError {
match error {
OpenDirectoryError::DoesNotExist(directory_path) => {
TantivyError::PathDoesNotExist(directory_path)
}
OpenDirectoryError::NotADirectory(directory_path) => {
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
}
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
}
}
}
impl From<serde_json::Error> for TantivyError {
fn from(error: serde_json::Error) -> TantivyError {
let io_err = io::Error::from(error);

View File

@@ -15,7 +15,7 @@ mod tests {
let field = schema_builder.add_bytes_field("bytesfield");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>vec![0u8, 1, 2, 3]));
index_writer.add_document(doc!(field=>vec![]));
index_writer.add_document(doc!(field=>vec![255u8]));

View File

@@ -9,8 +9,6 @@ use std::io::Write;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
/// Warning: this function does not call terminate. The caller is in charge of
/// closing the writer properly.
pub fn write_delete_bitset(
delete_bitset: &BitSet,
max_doc: u32,
@@ -44,24 +42,6 @@ pub struct DeleteBitSet {
}
impl DeleteBitSet {
#[cfg(test)]
pub(crate) fn for_test(docs: &[DocId], max_doc: u32) -> DeleteBitSet {
use crate::directory::{Directory, RAMDirectory, TerminatingWrite};
use std::path::Path;
assert!(docs.iter().all(|&doc| doc < max_doc));
let mut bitset = BitSet::with_max_value(max_doc);
for &doc in docs {
bitset.insert(doc);
}
let mut directory = RAMDirectory::create();
let path = Path::new("dummydeletebitset");
let mut wrt = directory.open_write(path).unwrap();
write_delete_bitset(&bitset, max_doc, &mut wrt).unwrap();
wrt.terminate().unwrap();
let source = directory.open_read(path).unwrap();
Self::open(source)
}
/// Opens a delete bitset given its data source.
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
let num_deleted: usize = data
@@ -103,35 +83,42 @@ impl HasLen for DeleteBitSet {
#[cfg(test)]
mod tests {
use super::DeleteBitSet;
use crate::common::HasLen;
use super::*;
use crate::directory::*;
use std::path::PathBuf;
#[test]
fn test_delete_bitset_empty() {
let delete_bitset = DeleteBitSet::for_test(&[], 10);
for doc in 0..10 {
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create();
{
let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
writer.terminate().unwrap();
}
assert_eq!(delete_bitset.len(), 0);
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
for doc in 0..max_doc {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
}
#[test]
fn test_delete_bitset() {
let delete_bitset = DeleteBitSet::for_test(&[1, 9], 10);
assert!(delete_bitset.is_alive(0));
assert!(delete_bitset.is_deleted(1));
assert!(delete_bitset.is_alive(2));
assert!(delete_bitset.is_alive(3));
assert!(delete_bitset.is_alive(4));
assert!(delete_bitset.is_alive(5));
assert!(delete_bitset.is_alive(6));
assert!(delete_bitset.is_alive(6));
assert!(delete_bitset.is_alive(7));
assert!(delete_bitset.is_alive(8));
assert!(delete_bitset.is_deleted(9));
for doc in 0..10 {
assert_eq!(delete_bitset.is_deleted(doc), !delete_bitset.is_alive(doc));
{
let mut bitset = BitSet::with_max_value(10);
bitset.insert(1);
bitset.insert(9);
test_delete_bitset_helper(&bitset, 10);
}
{
let mut bitset = BitSet::with_max_value(8);
bitset.insert(1);
bitset.insert(2);
bitset.insert(3);
bitset.insert(5);
bitset.insert(7);
test_delete_bitset_helper(&bitset, 8);
}
assert_eq!(delete_bitset.len(), 2);
}
}

View File

@@ -4,8 +4,8 @@ use std::result;
/// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not
/// defined in the schema as a fast field.
#[derive(Debug, Error)]
#[error("Fast field not available: '{field_name:?}'")]
#[derive(Debug, Fail)]
#[fail(display = "Fast field not available: '{:?}'", field_name)]
pub struct FastFieldNotAvailableError {
field_name: String,
}

View File

@@ -474,7 +474,7 @@ mod tests {
let date_field = schema_builder.add_date_field("date", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
index_writer.commit().unwrap();
@@ -511,7 +511,7 @@ mod tests {
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(1i64.to_u64()),

View File

@@ -25,7 +25,7 @@ mod tests {
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64, field=>3u64));
index_writer.add_document(doc!());
index_writer.add_document(doc!(field=>4u64));
@@ -64,7 +64,7 @@ mod tests {
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let first_time_stamp = chrono::Utc::now();
index_writer.add_document(
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
@@ -186,7 +186,7 @@ mod tests {
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=> 1i64, field => 3i64));
index_writer.add_document(doc!());
index_writer.add_document(doc!(field=> -4i64));
@@ -221,7 +221,7 @@ mod tests {
let field = schema_builder.add_facet_field("facetfield");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for i in 0..100_000 {
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
}

View File

@@ -74,7 +74,7 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index
.writer_for_tests()
.writer_with_num_threads(1, 30_000_000)
.expect("Failed to create index writer.");
index_writer.add_document(doc!(
facet_field => Facet::from("/category/cat2"),

View File

@@ -21,7 +21,7 @@ mod reader;
mod serializer;
mod writer;
pub use self::reader::{FieldNormReader, FieldNormReaders};
pub use self::reader::FieldNormReader;
pub use self::serializer::FieldNormsSerializer;
pub use self::writer::FieldNormsWriter;

View File

@@ -1,41 +1,6 @@
use super::{fieldnorm_to_id, id_to_fieldnorm};
use crate::common::CompositeFile;
use crate::directory::ReadOnlySource;
use crate::schema::Field;
use crate::space_usage::PerFieldSpaceUsage;
use crate::DocId;
use std::sync::Arc;
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the
/// field) of all indexed fields in the index.
///
/// Each fieldnorm is approximately compressed over one byte. We refer to this byte as
/// `fieldnorm_id`.
/// The mapping from `fieldnorm` to `fieldnorm_id` is given by monotonic.
#[derive(Clone)]
pub struct FieldNormReaders {
data: Arc<CompositeFile>,
}
impl FieldNormReaders {
/// Creates a field norm reader.
pub fn open(source: ReadOnlySource) -> crate::Result<FieldNormReaders> {
let data = CompositeFile::open(&source)?;
Ok(FieldNormReaders {
data: Arc::new(data),
})
}
/// Returns the FieldNormReader for a specific field.
pub fn get_field(&self, field: Field) -> Option<FieldNormReader> {
self.data.open_read(field).map(FieldNormReader::open)
}
/// Return a break down of the space usage per field.
pub fn space_usage(&self) -> PerFieldSpaceUsage {
self.data.space_usage()
}
}
/// Reads the fieldnorm associated to a document.
/// The fieldnorm represents the length associated to
@@ -54,7 +19,6 @@ impl FieldNormReaders {
/// Apart from compression, this scale also makes it possible to
/// precompute computationally expensive functions of the fieldnorm
/// in a very short array.
#[derive(Clone)]
pub struct FieldNormReader {
data: ReadOnlySource,
}
@@ -65,11 +29,6 @@ impl FieldNormReader {
FieldNormReader { data }
}
/// Returns the number of documents in this segment.
pub fn num_docs(&self) -> u32 {
self.data.len() as u32
}
/// Returns the `fieldnorm` associated to a doc id.
/// The fieldnorm is a value approximating the number
/// of tokens in a given field of the `doc_id`.
@@ -103,12 +62,13 @@ impl FieldNormReader {
pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
fieldnorm_to_id(fieldnorm)
}
}
#[cfg(test)]
pub fn for_test(field_norms: &[u32]) -> FieldNormReader {
#[cfg(test)]
impl From<Vec<u32>> for FieldNormReader {
fn from(field_norms: Vec<u32>) -> FieldNormReader {
let field_norms_id = field_norms
.iter()
.cloned()
.into_iter()
.map(FieldNormReader::fieldnorm_to_id)
.collect::<Vec<u8>>();
let field_norms_data = ReadOnlySource::from(field_norms_id);
@@ -117,20 +77,3 @@ impl FieldNormReader {
}
}
}
#[cfg(test)]
mod tests {
use crate::fieldnorm::FieldNormReader;
#[test]
fn test_from_fieldnorms_array() {
let fieldnorms = &[1, 2, 3, 4, 1_000_000];
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
assert_eq!(fieldnorm_reader.num_docs(), 5);
assert_eq!(fieldnorm_reader.fieldnorm(0), 1);
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
assert_eq!(fieldnorm_reader.fieldnorm(2), 3);
assert_eq!(fieldnorm_reader.fieldnorm(3), 4);
assert_eq!(fieldnorm_reader.fieldnorm(4), 983_064);
}
}

View File

@@ -78,12 +78,11 @@ impl FieldNormsWriter {
}
/// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() {
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..];
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
}
fieldnorms_serializer.close()?;
Ok(())
}
}

View File

@@ -27,7 +27,6 @@ use crate::Opstamp;
use crossbeam::channel;
use futures::executor::block_on;
use futures::future::Future;
use slog::{error, info, Logger};
use smallvec::smallvec;
use smallvec::SmallVec;
use std::mem;
@@ -196,21 +195,20 @@ fn index_documents(
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor,
logger: &Logger,
) -> crate::Result<bool> {
let schema = segment.schema();
info!(logger, "segment-index"; "stage"=>"start");
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
let mut buffer_limit_reached = false;
for document_group in grouped_document_iterator {
for doc in document_group {
segment_writer.add_document(doc, &schema)?;
}
let mem_usage = segment_writer.mem_usage();
if mem_usage >= memory_budget - MARGIN_IN_BYTES {
buffer_limit_reached = true;
info!(
"Buffer limit reached, flushing segment with maxdoc={}.",
segment_writer.max_doc()
);
break;
}
}
@@ -230,14 +228,6 @@ fn index_documents(
let segment_with_max_doc = segment.with_max_doc(max_doc);
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
info!(
logger,
"segment-index";
"stage" => "serialize",
"cause" => if buffer_limit_reached { "buffer-limit" } else { "commit" },
"maxdoc" => max_doc,
"last_docstamp" => last_docstamp
);
let delete_bitset_opt = apply_deletes(
&segment_with_max_doc,
@@ -251,18 +241,7 @@ fn index_documents(
delete_cursor,
delete_bitset_opt,
);
info!(
logger,
"segment-index";
"stage" => "publish",
);
block_on(segment_updater.schedule_add_segment(segment_entry))?;
info!(
logger,
"segment-index";
"stage" => "end",
);
Ok(true)
}
@@ -365,10 +344,6 @@ impl IndexWriter {
Ok(index_writer)
}
pub(crate) fn logger(&self) -> &Logger {
self.index.logger()
}
fn drop_sender(&mut self) {
let (sender, _receiver) = channel::bounded(1);
self.operation_sender = sender;
@@ -377,8 +352,6 @@ impl IndexWriter {
/// If there are some merging threads, blocks until they all finish their work and
/// then drop the `IndexWriter`.
pub fn wait_merging_threads(mut self) -> crate::Result<()> {
info!(self.logger(), "wait-merge-threads"; "stage"=>"start");
// this will stop the indexing thread,
// dropping the last reference to the segment_updater.
self.drop_sender();
@@ -399,9 +372,9 @@ impl IndexWriter {
.map_err(|_| TantivyError::ErrorInThread("Failed to join merging thread.".into()));
if let Err(ref e) = result {
error!(self.logger(), "some merge thread failed"; "cause"=>e.to_string());
error!("Some merging thread failed {:?}", e);
}
info!(self.logger(), "wait-merge-threads"; "stage"=>"stop");
result
}
@@ -461,16 +434,12 @@ impl IndexWriter {
return Ok(());
}
let segment = index.new_segment();
let segment_id = segment.id();
index_documents(
mem_budget,
segment,
&mut document_iterator,
&mut segment_updater,
delete_cursor.clone(),
&index
.logger()
.new(slog::o!("segment"=>segment_id.to_string())),
)?;
}
})?;
@@ -567,7 +536,6 @@ impl IndexWriter {
/// when no documents are remaining.
///
/// Returns the former segment_ready channel.
#[allow(unused_must_use)]
fn recreate_document_channel(&mut self) -> OperationReceiver {
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
@@ -584,10 +552,7 @@ impl IndexWriter {
///
/// The opstamp at the last commit is returned.
pub fn rollback(&mut self) -> crate::Result<Opstamp> {
info!(
self.logger(),
"Rolling back to opstamp {}", self.committed_opstamp
);
info!("Rolling back to opstamp {}", self.committed_opstamp);
// marks the segment updater as killed. From now on, all
// segment updates will be ignored.
self.segment_updater.kill();
@@ -610,7 +575,7 @@ impl IndexWriter {
//
// This will drop the document queue, and the thread
// should terminate.
*self = new_index_writer;
mem::replace(self, new_index_writer);
// Drains the document receiver pipeline :
// Workers don't need to index the pending documents.
@@ -644,8 +609,6 @@ impl IndexWriter {
/// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
let logger = self.logger().clone();
// Here, because we join all of the worker threads,
// all of the segment update for this commit have been
// sent.
@@ -656,10 +619,7 @@ impl IndexWriter {
//
// This will move uncommitted segments to the state of
// committed segments.
let commit_opstamp = self.stamper.stamp();
info!(logger, "prepare-commit"; "opstamp" => commit_opstamp);
info!("Preparing commit");
// this will drop the current document channel
// and recreate a new one.
@@ -675,8 +635,9 @@ impl IndexWriter {
self.add_indexing_worker()?;
}
let commit_opstamp = self.stamper.stamp();
let prepared_commit = PreparedCommit::new(self, commit_opstamp);
info!(logger, "Prepared commit {}", commit_opstamp);
info!("Prepared commit {}", commit_opstamp);
Ok(prepared_commit)
}
@@ -838,7 +799,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let index_writer = index.writer_for_tests().unwrap();
let index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let operations = vec![
UserOperation::Add(doc!(text_field=>"a")),
UserOperation::Add(doc!(text_field=>"b")),
@@ -853,7 +814,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "hello1"));
index_writer.add_document(doc!(text_field => "hello2"));
assert!(index_writer.commit().is_ok());
@@ -902,7 +863,7 @@ mod tests {
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let a_term = Term::from_field_text(text_field, "a");
let b_term = Term::from_field_text(text_field, "b");
let operations = vec![
@@ -964,8 +925,8 @@ mod tests {
fn test_lockfile_already_exists_error_msg() {
let schema_builder = schema::Schema::builder();
let index = Index::create_in_ram(schema_builder.build());
let _index_writer = index.writer_for_tests().unwrap();
match index.writer_for_tests() {
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
match index.writer_with_num_threads(1, 3_000_000) {
Err(err) => {
let err_msg = err.to_string();
assert!(err_msg.contains("already an `IndexWriter`"));
@@ -1299,7 +1260,7 @@ mod tests {
let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid"));
let commit = index_writer.commit();
assert!(commit.is_ok());

View File

@@ -8,31 +8,30 @@ use crate::fastfield::DeleteBitSet;
use crate::fastfield::FastFieldReader;
use crate::fastfield::FastFieldSerializer;
use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fieldnorm::FieldNormReader;
use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::indexer::SegmentSerializer;
use crate::postings::InvertedIndexSerializer;
use crate::postings::Postings;
use crate::postings::{InvertedIndexSerializer, SegmentPostings};
use crate::schema::Cardinality;
use crate::schema::FieldType;
use crate::schema::{Field, Schema};
use crate::store::StoreWriter;
use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal;
use crate::{DocId, InvertedIndexReader, SegmentComponent};
use crate::DocId;
use std::cmp;
use std::collections::HashMap;
use std::sync::Arc;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::Result<u64> {
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64;
let mut count: [usize; 256] = [0; 256];
for reader in readers {
if reader.has_deletes() {
// if there are deletes, then we use an approximation
// using the fieldnorm
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?;
let fieldnorms_reader = reader.get_fieldnorms_reader(field);
for doc in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc);
count[fieldnorm_id as usize] += 1;
@@ -41,7 +40,7 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::R
total_tokens += reader.inverted_index(field).total_num_tokens();
}
}
Ok(total_tokens
total_tokens
+ count
.iter()
.cloned()
@@ -49,7 +48,7 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::R
.map(|(fieldnorm_ord, count)| {
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
})
.sum::<u64>())
.sum::<u64>()
}
pub struct IndexMerger {
@@ -168,14 +167,14 @@ impl IndexMerger {
fn write_fieldnorms(
&self,
mut fieldnorms_serializer: FieldNormsSerializer,
fieldnorms_serializer: &mut FieldNormsSerializer,
) -> crate::Result<()> {
let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema);
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
for field in fields {
fieldnorms_data.clear();
for reader in &self.readers {
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?;
let fieldnorms_reader = reader.get_fieldnorms_reader(field);
for doc_id in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id);
fieldnorms_data.push(fieldnorm_id);
@@ -183,7 +182,6 @@ impl IndexMerger {
}
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
}
fieldnorms_serializer.close()?;
Ok(())
}
@@ -494,11 +492,10 @@ impl IndexMerger {
indexed_field: Field,
field_type: &FieldType,
serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>,
) -> crate::Result<Option<TermOrdinalMapping>> {
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new();
let field_readers: Vec<Arc<InvertedIndexReader>> = self
let field_readers = self
.readers
.iter()
.map(|reader| reader.inverted_index(indexed_field))
@@ -541,7 +538,7 @@ impl IndexMerger {
// The total number of tokens will only be exact when there has been no deletes.
//
// Otherwise, we approximate by removing deleted documents proportionally.
let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field)?;
let total_num_tokens: u64 = compute_total_num_tokens(&self.readers, indexed_field);
// Create the total list of doc ids
// by stacking the doc ids from the different segment.
@@ -553,8 +550,7 @@ impl IndexMerger {
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc,
// seg0.max_doc + seg1.max_doc + seg2.max_doc]
// ...
let mut field_serializer =
serializer.new_field(indexed_field, total_num_tokens, fieldnorm_reader)?;
let mut field_serializer = serializer.new_field(indexed_field, total_num_tokens)?;
let field_entry = self.schema.get_field_entry(indexed_field);
@@ -564,78 +560,77 @@ impl IndexMerger {
indexed. Have you modified the schema?",
);
let mut segment_postings_containing_the_term: Vec<(usize, SegmentPostings)> = vec![];
while merged_terms.advance() {
segment_postings_containing_the_term.clear();
let term_bytes: &[u8] = merged_terms.key();
let mut total_doc_freq = 0;
// Let's compute the list of non-empty posting lists
for heap_item in merged_terms.current_kvs() {
let segment_ord = heap_item.segment_ord;
let term_info = heap_item.streamer.value();
let segment_reader = &self.readers[heap_item.segment_ord];
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
let segment_postings =
inverted_index.read_postings_from_terminfo(term_info, segment_postings_option);
let delete_bitset_opt = segment_reader.delete_bitset();
let doc_freq = if let Some(delete_bitset) = delete_bitset_opt {
segment_postings.doc_freq_given_deletes(delete_bitset)
} else {
segment_postings.doc_freq()
};
if doc_freq > 0u32 {
total_doc_freq += doc_freq;
segment_postings_containing_the_term.push((segment_ord, segment_postings));
}
}
let segment_postings: Vec<_> = merged_terms
.current_kvs()
.iter()
.flat_map(|heap_item| {
let segment_ord = heap_item.segment_ord;
let term_info = heap_item.streamer.value();
let segment_reader = &self.readers[heap_item.segment_ord];
let inverted_index = segment_reader.inverted_index(indexed_field);
let mut segment_postings = inverted_index
.read_postings_from_terminfo(term_info, segment_postings_option);
let mut doc = segment_postings.doc();
while doc != TERMINATED {
if !segment_reader.is_deleted(doc) {
return Some((segment_ord, segment_postings));
}
doc = segment_postings.advance();
}
None
})
.collect();
// At this point, `segment_postings` contains the posting list
// of all of the segments containing the given term (and that are non-empty)
// of all of the segments containing the given term.
//
// These segments are non-empty and advance has already been called.
if total_doc_freq == 0u32 {
// All docs that used to contain the term have been deleted. The `term` will be
// entirely removed.
continue;
}
if !segment_postings.is_empty() {
// If not, the `term` will be entirely removed.
let to_term_ord = field_serializer.new_term(term_bytes, total_doc_freq)?;
// We know that there is at least one document containing
// the term, so we add it.
let to_term_ord = field_serializer.new_term(term_bytes)?;
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
term_ord_mapping.register_from_to(segment_ord, from_term_ord, to_term_ord);
}
}
// We can now serialize this postings, by pushing each document to the
// postings serializer.
for (segment_ord, mut segment_postings) in
segment_postings_containing_the_term.drain(..)
{
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
let mut doc = segment_postings.doc();
while doc != TERMINATED {
// deleted doc are skipped as they do not have a `remapped_doc_id`.
if let Some(remapped_doc_id) = old_to_new_doc_id[doc as usize] {
// we make sure to only write the term iff
// there is at least one document.
let term_freq = segment_postings.term_freq();
segment_postings.positions(&mut positions_buffer);
let delta_positions = delta_computer.compute_delta(&positions_buffer);
field_serializer.write_doc(remapped_doc_id, term_freq, delta_positions)?;
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
term_ord_mapping.register_from_to(segment_ord, from_term_ord, to_term_ord);
}
doc = segment_postings.advance();
}
}
// closing the term.
field_serializer.close_term()?;
// We can now serialize this postings, by pushing each document to the
// postings serializer.
for (segment_ord, mut segment_postings) in segment_postings {
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
let mut doc = segment_postings.doc();
while doc != TERMINATED {
// deleted doc are skipped as they do not have a `remapped_doc_id`.
if let Some(remapped_doc_id) = old_to_new_doc_id[doc as usize] {
// we make sure to only write the term iff
// there is at least one document.
let term_freq = segment_postings.term_freq();
segment_postings.positions(&mut positions_buffer);
let delta_positions = delta_computer.compute_delta(&positions_buffer);
field_serializer.write_doc(
remapped_doc_id,
term_freq,
delta_positions,
)?;
}
doc = segment_postings.advance();
}
}
// closing the term.
field_serializer.close_term()?;
}
}
field_serializer.close()?;
Ok(term_ord_mapping_opt)
@@ -644,18 +639,13 @@ impl IndexMerger {
fn write_postings(
&self,
serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() {
let fieldnorm_reader = fieldnorm_readers.get_field(field);
if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
field,
field_entry.field_type(),
serializer,
fieldnorm_reader,
)? {
if let Some(term_ordinal_mapping) =
self.write_postings_for_field(field, field_entry.field_type(), serializer)?
{
term_ordinal_mappings.insert(field, term_ordinal_mapping);
}
}
@@ -681,15 +671,8 @@ impl IndexMerger {
impl SerializableSegment for IndexMerger {
fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
self.write_fieldnorms(fieldnorms_serializer)?;
}
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let term_ord_mappings =
self.write_postings(serializer.get_postings_serializer(), fieldnorm_readers)?;
let term_ord_mappings = self.write_postings(serializer.get_postings_serializer())?;
self.write_fieldnorms(serializer.get_fieldnorms_serializer())?;
self.write_fast_fields(serializer.get_fast_field_serializer(), term_ord_mappings)?;
self.write_storable_fields(serializer.get_store_writer())?;
serializer.close()?;
@@ -699,15 +682,15 @@ impl SerializableSegment for IndexMerger {
#[cfg(test)]
mod tests {
use crate::assert_nearly_equals;
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::collector::tests::{BytesFastFieldTestCollector, FastFieldTestCollector};
use crate::collector::{Count, FacetCollector};
use crate::core::Index;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::Scorer;
use crate::query::TermQuery;
use crate::schema;
use crate::schema::Cardinality;
use crate::schema::Document;
use crate::schema::Facet;
use crate::schema::IndexRecordOption;
@@ -715,11 +698,9 @@ mod tests {
use crate::schema::Term;
use crate::schema::TextFieldIndexing;
use crate::schema::INDEXED;
use crate::schema::{Cardinality, TEXT};
use crate::DocAddress;
use crate::IndexWriter;
use crate::Searcher;
use crate::{schema, DocSet, SegmentId};
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::executor::block_on;
use std::io::Cursor;
@@ -751,7 +732,7 @@ mod tests {
};
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
// writing the segment
{
@@ -803,7 +784,7 @@ mod tests {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
}
@@ -904,7 +885,7 @@ mod tests {
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let reader = index.reader().unwrap();
let search_term = |searcher: &Searcher, term: Term| {
let collector = FastFieldTestCollector::for_field(score_field);
@@ -1211,7 +1192,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap();
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
let mut doc = Document::default();
for facet in doc_facets {
@@ -1276,7 +1257,7 @@ mod tests {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap();
@@ -1295,7 +1276,7 @@ mod tests {
// Deleting one term
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
let facet_term = Term::from_facet(facet_field, &facet);
index_writer.delete_term(facet_term);
@@ -1320,7 +1301,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder();
let int_field = schema_builder.add_u64_field("intvals", INDEXED);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(int_field => 1u64));
index_writer.commit().expect("commit failed");
index_writer.add_document(doc!(int_field => 1u64));
@@ -1349,7 +1330,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap();
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut doc = Document::default();
doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone());
@@ -1388,7 +1369,7 @@ mod tests {
let index = Index::create_in_ram(schema_builder.build());
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
let mut doc = Document::default();
for &val in int_vals {
@@ -1462,7 +1443,7 @@ mod tests {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
assert!(index_writer.wait_merging_threads().is_ok());
}
@@ -1516,7 +1497,7 @@ mod tests {
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?;
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
// Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default();
@@ -1526,9 +1507,12 @@ mod tests {
for i in 0..100 {
let mut doc = Document::new();
doc.add_f64(field, 42.0);
doc.add_f64(multi_field, 0.24);
doc.add_f64(multi_field, 0.27);
writer.add_document(doc);
if i % 5 == 0 {
writer.commit()?;
}
@@ -1540,72 +1524,6 @@ mod tests {
// If a merging thread fails, we should end up with more
// than one segment here
assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
#[test]
fn test_merged_index_has_blockwand() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let text = builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?;
let happy_term = Term::from_field_text(text, "happy");
let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs);
for _ in 0..62 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(searcher.segment_reader(0u32), 1.0)?;
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
for _ in 0..81 {
writer.add_document(doc!(text=>"hello happy tax payer"));
}
writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
}
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect();
block_on(writer.merge(&segment_ids[..]))?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query
.specialized_weight(&searcher, true)
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
Ok(())
}

View File

@@ -29,9 +29,8 @@ pub use self::segment_writer::SegmentWriter;
/// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy;
#[cfg(feature = "mmap")]
#[cfg(test)]
mod tests_mmap {
mod tests {
use crate::schema::{self, Schema};
use crate::{Index, Term};
@@ -40,7 +39,7 @@ mod tests_mmap {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// there must be one deleted document in the segment
index_writer.add_document(doc!(text_field=>"b"));
index_writer.delete_term(Term::from_field_text(text_field, "b"));

View File

@@ -1,7 +1,6 @@
use super::IndexWriter;
use crate::Opstamp;
use futures::executor::block_on;
use slog::info;
/// A prepared commit
pub struct PreparedCommit<'a> {
@@ -32,7 +31,7 @@ impl<'a> PreparedCommit<'a> {
}
pub fn commit(self) -> crate::Result<Opstamp> {
info!(self.index_writer.logger(), "committing {}", self.opstamp);
info!("committing {}", self.opstamp);
let _ = block_on(
self.index_writer
.segment_updater()

View File

@@ -1,5 +1,3 @@
use slog::{warn, Logger};
use super::segment_register::SegmentRegister;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
@@ -44,9 +42,9 @@ impl SegmentRegisters {
///
/// It guarantees the atomicity of the
/// changes (merges especially)
#[derive(Default)]
pub struct SegmentManager {
registers: RwLock<SegmentRegisters>,
logger: Logger,
}
impl Debug for SegmentManager {
@@ -79,14 +77,12 @@ impl SegmentManager {
pub fn from_segments(
segment_metas: Vec<SegmentMeta>,
delete_cursor: &DeleteCursor,
logger: Logger,
) -> SegmentManager {
SegmentManager {
registers: RwLock::new(SegmentRegisters {
uncommitted: SegmentRegister::default(),
committed: SegmentRegister::new(segment_metas, delete_cursor),
}),
logger,
}
}
@@ -190,7 +186,7 @@ impl SegmentManager {
let segments_status = registers_lock
.segments_status(before_merge_segment_ids)
.ok_or_else(|| {
warn!(self.logger, "couldn't find segment in SegmentManager");
warn!("couldn't find segment in SegmentManager");
crate::TantivyError::InvalidArgument(
"The segments that were merged could not be found in the SegmentManager. \
This is not necessarily a bug, and can happen after a rollback for instance."

View File

@@ -8,16 +8,15 @@ use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk
/// the data accumulated and sorted by the `SegmentWriter`.
pub struct SegmentSerializer {
segment: Segment,
store_writer: StoreWriter,
fast_field_serializer: FastFieldSerializer,
fieldnorms_serializer: Option<FieldNormsSerializer>,
fieldnorms_serializer: FieldNormsSerializer,
postings_serializer: InvertedIndexSerializer,
}
impl SegmentSerializer {
/// Creates a new `SegmentSerializer`.
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
pub fn for_segment(segment: &mut Segment) -> crate::Result<SegmentSerializer> {
let store_write = segment.open_write(SegmentComponent::STORE)?;
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
@@ -26,20 +25,15 @@ impl SegmentSerializer {
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
let postings_serializer = InvertedIndexSerializer::open(segment)?;
Ok(SegmentSerializer {
segment,
store_writer: StoreWriter::new(store_write),
fast_field_serializer,
fieldnorms_serializer: Some(fieldnorms_serializer),
fieldnorms_serializer,
postings_serializer,
})
}
pub fn segment(&self) -> &Segment {
&self.segment
}
/// Accessor to the `PostingsSerializer`.
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
&mut self.postings_serializer
@@ -50,11 +44,9 @@ impl SegmentSerializer {
&mut self.fast_field_serializer
}
/// Extract the field norm serializer.
///
/// Note the fieldnorms serializer can only be extracted once.
pub fn extract_fieldnorms_serializer(&mut self) -> Option<FieldNormsSerializer> {
self.fieldnorms_serializer.take()
/// Accessor to the field norm serializer.
pub fn get_fieldnorms_serializer(&mut self) -> &mut FieldNormsSerializer {
&mut self.fieldnorms_serializer
}
/// Accessor to the `StoreWriter`.
@@ -63,13 +55,11 @@ impl SegmentSerializer {
}
/// Finalize the segment serialization.
pub fn close(mut self) -> crate::Result<()> {
if let Some(fieldnorms_serializer) = self.extract_fieldnorms_serializer() {
fieldnorms_serializer.close()?;
}
pub fn close(self) -> crate::Result<()> {
self.fast_field_serializer.close()?;
self.postings_serializer.close()?;
self.store_writer.close()?;
self.fieldnorms_serializer.close()?;
Ok(())
}
}

View File

@@ -23,9 +23,9 @@ use futures::channel::oneshot;
use futures::executor::{ThreadPool, ThreadPoolBuilder};
use futures::future::Future;
use futures::future::TryFutureExt;
use slog::{debug, error, info, warn};
use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::io::Write;
use std::ops::Deref;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
@@ -65,11 +65,12 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::R
///
/// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
let mut meta_json = serde_json::to_string_pretty(metas)?;
info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer.
meta_json.push_str("\n");
debug!(directory.logger(), "save meta"; "content"=>&meta_json);
directory.atomic_write(&META_FILEPATH, meta_json.as_bytes())?;
writeln!(&mut buffer)?;
directory.atomic_write(&META_FILEPATH, &buffer[..])?;
debug!("Saved metas {:?}", serde_json::to_string_pretty(&metas));
Ok(())
}
@@ -96,6 +97,7 @@ impl Deref for SegmentUpdater {
async fn garbage_collect_files(
segment_updater: SegmentUpdater,
) -> crate::Result<GarbageCollectionResult> {
info!("Running garbage collection");
let mut index = segment_updater.index.clone();
index
.directory_mut()
@@ -105,12 +107,14 @@ async fn garbage_collect_files(
/// Merges a list of segments the list of segment givens in the `segment_entries`.
/// This function happens in the calling thread and is computationally expensive.
fn merge(
merged_segment: Segment,
index: &Index,
mut segment_entries: Vec<SegmentEntry>,
target_opstamp: Opstamp,
) -> crate::Result<SegmentEntry> {
// First we apply all of the delete to the merged segment, up to the target opstamp.
// first we need to apply deletes to our segment.
let mut merged_segment = index.new_segment();
// First we apply all of the delet to the merged segment, up to the target opstamp.
for segment_entry in &mut segment_entries {
let segment = index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?;
@@ -127,13 +131,12 @@ fn merge(
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
// ... we just serialize this index merger in our new segment to merge the two segments.
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
let num_docs = merger.write(segment_serializer)?;
let merged_segment_id = merged_segment.id();
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
let segment_meta = index.new_segment_meta(merged_segment_id, num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
}
@@ -163,8 +166,7 @@ impl SegmentUpdater {
delete_cursor: &DeleteCursor,
) -> crate::Result<SegmentUpdater> {
let segments = index.searchable_segment_metas()?;
let segment_manager =
SegmentManager::from_segments(segments, delete_cursor, index.logger().clone());
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
let pool = ThreadPoolBuilder::new()
.name_prefix("segment_updater")
.pool_size(1)
@@ -384,18 +386,7 @@ impl SegmentUpdater {
.segment_manager
.start_merge(merge_operation.segment_ids())?;
let segment_ids_str: String = merge_operation
.segment_ids()
.iter()
.map(|segment_id| segment_id.to_string())
.collect::<Vec<String>>()
.join(",");
let merged_segment = self.index.new_segment();
let logger = self.index.logger().new(slog::o!("segments"=>segment_ids_str, "merged-segment"=>merged_segment.id().to_string()));
let num_merges: usize = self.merge_operations.list().len();
slog::info!(&logger, "merge"; "stage"=>"start", "num-merges" => num_merges);
info!("Starting merge - {:?}", merge_operation.segment_ids());
let (merging_future_send, merging_future_recv) =
oneshot::channel::<crate::Result<SegmentMeta>>();
@@ -406,20 +397,22 @@ impl SegmentUpdater {
// as well as which segment is currently in merge and therefore should not be
// candidate for another merge.
match merge(
merged_segment,
&segment_updater.index,
segment_entries,
merge_operation.target_opstamp(),
) {
Ok(after_merge_segment_entry) => {
info!(&logger, "merge"; "stage" => "end");
let segment_meta = segment_updater
.end_merge(merge_operation, after_merge_segment_entry)
.await;
let _send_result = merging_future_send.send(segment_meta);
}
Err(e) => {
error!(&logger, "merge"; "stage" => "fail", "cause"=>e.to_string());
warn!(
"Merge of {:?} was cancelled: {:?}",
merge_operation.segment_ids().to_vec(),
e
);
// ... cancel merge
if cfg!(test) {
panic!("Merge failed.");
@@ -460,12 +453,11 @@ impl SegmentUpdater {
.collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter());
let logger = self.index.logger();
for merge_operation in merge_candidates {
if let Err(err) = self.start_merge(merge_operation) {
warn!(
logger,
"merge-start-fail (not fatal, not necessarily a problem)"; "reason" => format!("{}", err),
"Starting the merge failed for the following reason. This is not fatal. {}",
err
);
}
}
@@ -478,11 +470,8 @@ impl SegmentUpdater {
) -> impl Future<Output = crate::Result<SegmentMeta>> {
let segment_updater = self.clone();
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
let logger = self.index.logger().new(
slog::o!("segment"=>after_merge_segment_meta.id().to_string(),
"delete-opstamp"=>after_merge_segment_meta.delete_opstamp()),
);
let end_merge_future = self.schedule_future(async move {
info!("End merge {:?}", after_merge_segment_entry.meta());
{
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
if let Some(delete_operation) = delete_cursor.get() {
@@ -496,7 +485,6 @@ impl SegmentUpdater {
committed_opstamp,
) {
error!(
logger,
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
merge_operation.segment_ids(),
e
@@ -533,7 +521,7 @@ impl SegmentUpdater {
///
/// Upon termination of the current merging threads,
/// merge opportunity may appear.
///
//
/// We keep waiting until the merge policy judges that
/// no opportunity is available.
///
@@ -566,7 +554,7 @@ mod tests {
let index = Index::create_in_ram(schema);
// writing the segment
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
{
@@ -619,7 +607,7 @@ mod tests {
let index = Index::create_in_ram(schema);
// writing the segment
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for _ in 0..100 {
@@ -690,7 +678,7 @@ mod tests {
let index = Index::create_in_ram(schema);
// writing the segment
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
for _ in 0..100 {

View File

@@ -1,7 +1,8 @@
use super::operation::AddOperation;
use crate::core::Segment;
use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter;
@@ -13,10 +14,10 @@ use crate::schema::{Field, FieldEntry};
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::DocId;
use crate::Opstamp;
use crate::{core::Segment, tokenizer::MAX_TOKEN_LEN};
use crate::{DocId, SegmentComponent};
use std::io;
use std::str;
/// Computes the initial size of the hash table.
///
@@ -47,7 +48,6 @@ pub struct SegmentWriter {
fieldnorms_writer: FieldNormsWriter,
doc_opstamps: Vec<Opstamp>,
tokenizers: Vec<Option<TextAnalyzer>>,
term_buffer: Term,
}
impl SegmentWriter {
@@ -62,12 +62,11 @@ impl SegmentWriter {
/// - schema
pub fn for_segment(
memory_budget: usize,
segment: Segment,
mut segment: Segment,
schema: &Schema,
) -> crate::Result<SegmentWriter> {
let tokenizer_manager = segment.index().tokenizers().clone();
let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(segment)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema
.fields()
@@ -77,7 +76,7 @@ impl SegmentWriter {
.get_indexing_options()
.and_then(|text_index_option| {
let tokenizer_name = &text_index_option.tokenizer();
tokenizer_manager.get(tokenizer_name)
segment.index().tokenizers().get(tokenizer_name)
}),
_ => None,
},
@@ -91,7 +90,6 @@ impl SegmentWriter {
fast_field_writers: FastFieldsWriter::from_schema(schema),
doc_opstamps: Vec::with_capacity(1_000),
tokenizers,
term_buffer: Term::new(),
})
}
@@ -129,29 +127,24 @@ impl SegmentWriter {
if !field_options.is_indexed() {
continue;
}
let (term_buffer, multifield_postings) =
(&mut self.term_buffer, &mut self.multifield_postings);
match *field_options.field_type() {
FieldType::HierarchicalFacet => {
term_buffer.set_field(field);
let facets =
field_values
.iter()
.flat_map(|field_value| match *field_value.value() {
Value::Facet(ref facet) => Some(facet.encoded_str()),
_ => {
panic!("Expected hierarchical facet");
}
});
let facets: Vec<&str> = field_values
.iter()
.flat_map(|field_value| match *field_value.value() {
Value::Facet(ref facet) => Some(facet.encoded_str()),
_ => {
panic!("Expected hierarchical facet");
}
})
.collect();
let mut term = Term::for_field(field); // we set the Term
for fake_str in facets {
let mut unordered_term_id_opt = None;
FacetTokenizer.token_stream(fake_str).process(&mut |token| {
if token.text.len() > MAX_TOKEN_LEN {
return;
}
term_buffer.set_text(&token.text);
term.set_text(&token.text);
let unordered_term_id =
multifield_postings.subscribe(doc_id, &term_buffer);
self.multifield_postings.subscribe(doc_id, &term);
unordered_term_id_opt = Some(unordered_term_id);
});
if let Some(unordered_term_id) = unordered_term_id_opt {
@@ -174,6 +167,7 @@ impl SegmentWriter {
if let Some(last_token) = tok_str.tokens.last() {
total_offset += last_token.offset_to;
}
token_streams
.push(PreTokenizedStream::from(tok_str.clone()).into());
}
@@ -183,6 +177,7 @@ impl SegmentWriter {
{
offsets.push(total_offset);
total_offset += text.len();
token_streams.push(tokenizer.token_stream(text));
}
}
@@ -194,12 +189,8 @@ impl SegmentWriter {
0
} else {
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
multifield_postings.index_text(
doc_id,
field,
&mut token_stream,
term_buffer,
)
self.multifield_postings
.index_text(doc_id, field, &mut token_stream)
};
self.fieldnorms_writer.record(doc_id, field, num_tokens);
@@ -207,36 +198,44 @@ impl SegmentWriter {
FieldType::U64(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
term_buffer.set_field(field_value.field());
term_buffer.set_u64(field_value.value().u64_value());
multifield_postings.subscribe(doc_id, &term_buffer);
let term = Term::from_field_u64(
field_value.field(),
field_value.value().u64_value(),
);
self.multifield_postings.subscribe(doc_id, &term);
}
}
}
FieldType::Date(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
term_buffer.set_field(field_value.field());
term_buffer.set_i64(field_value.value().date_value().timestamp());
multifield_postings.subscribe(doc_id, &term_buffer);
let term = Term::from_field_i64(
field_value.field(),
field_value.value().date_value().timestamp(),
);
self.multifield_postings.subscribe(doc_id, &term);
}
}
}
FieldType::I64(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
term_buffer.set_field(field_value.field());
term_buffer.set_i64(field_value.value().i64_value());
multifield_postings.subscribe(doc_id, &term_buffer);
let term = Term::from_field_i64(
field_value.field(),
field_value.value().i64_value(),
);
self.multifield_postings.subscribe(doc_id, &term);
}
}
}
FieldType::F64(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
term_buffer.set_field(field_value.field());
term_buffer.set_f64(field_value.value().f64_value());
multifield_postings.subscribe(doc_id, &term_buffer);
let term = Term::from_field_f64(
field_value.field(),
field_value.value().f64_value(),
);
self.multifield_postings.subscribe(doc_id, &term);
}
}
}
@@ -281,16 +280,9 @@ fn write(
fieldnorms_writer: &FieldNormsWriter,
mut serializer: SegmentSerializer,
) -> crate::Result<()> {
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
fieldnorms_writer.serialize(fieldnorms_serializer)?;
}
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FIELDNORMS)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let term_ord_map =
multifield_postings.serialize(serializer.get_postings_serializer(), fieldnorm_readers)?;
let term_ord_map = multifield_postings.serialize(serializer.get_postings_serializer())?;
fast_field_writers.serialize(serializer.get_fast_field_serializer(), &term_ord_map)?;
fieldnorms_writer.serialize(serializer.get_fieldnorms_serializer())?;
serializer.close()?;
Ok(())
}

View File

@@ -102,7 +102,10 @@
extern crate serde_json;
#[macro_use]
extern crate thiserror;
extern crate log;
#[macro_use]
extern crate failure;
#[cfg(all(test, feature = "unstable"))]
extern crate test;
@@ -145,7 +148,6 @@ pub mod schema;
pub mod space_usage;
pub mod store;
pub mod termdict;
pub use slog;
mod reader;
@@ -171,7 +173,7 @@ use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
/// Index format version.
const INDEX_FORMAT_VERSION: u32 = 2;
const INDEX_FORMAT_VERSION: u32 = 1;
/// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -243,10 +245,11 @@ pub type DocId = u32;
/// with opstamp `n+1`.
pub type Opstamp = u64;
/// A Score that represents the relevance of the document to the query
/// A f32 that represents the relevance of the document to the query
///
/// This is modelled internally as a `f32`. The larger the number, the more relevant
/// the document to the search query.
/// This is modelled internally as a `f32`. The
/// larger the number, the more relevant the document
/// to the search
pub type Score = f32;
/// A `SegmentLocalId` identifies a segment.
@@ -279,6 +282,7 @@ pub struct DocAddress(pub SegmentLocalId, pub DocId);
#[cfg(test)]
mod tests {
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::core::SegmentReader;
use crate::docset::{DocSet, TERMINATED};
@@ -286,6 +290,7 @@ mod tests {
use crate::schema::*;
use crate::DocAddress;
use crate::Index;
use crate::IndexWriter;
use crate::Postings;
use crate::ReloadPolicy;
use rand::distributions::Bernoulli;
@@ -293,26 +298,17 @@ mod tests {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
/// Checks if left and right are close one to each other.
/// Panics if the two values are more than 0.5% apart.
#[macro_export]
macro_rules! assert_nearly_equals {
($left:expr, $right:expr) => {{
match (&$left, &$right) {
(left_val, right_val) => {
let diff = (left_val - right_val).abs();
let add = left_val.abs() + right_val.abs();
if diff > 0.0005 * add {
panic!(
r#"assertion failed: `(left ~= right)`
left: `{:?}`,
right: `{:?}`"#,
&*left_val, &*right_val
)
}
}
}
}};
pub fn assert_nearly_equals(expected: f32, val: f32) {
assert!(
nearly_equals(val, expected),
"Got {}, expected {}.",
val,
expected
);
}
pub fn nearly_equals(a: f32, b: f32) -> bool {
(a - b).abs() < 0.0005 * (a + b).abs()
}
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
@@ -350,14 +346,14 @@ mod tests {
#[test]
#[cfg(feature = "mmap")]
fn test_indexing() -> crate::Result<()> {
fn test_indexing() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap();
{
// writing the segment
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
let doc = doc!(text_field=>"af b");
index_writer.add_document(doc);
@@ -372,30 +368,29 @@ mod tests {
}
assert!(index_writer.commit().is_ok());
}
Ok(())
}
#[test]
fn test_docfreq1() -> crate::Result<()> {
fn test_docfreq1() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?;
index_writer.commit().unwrap();
}
{
index_writer.add_document(doc!(text_field=>"a"));
index_writer.add_document(doc!(text_field=>"a a"));
index_writer.commit()?;
index_writer.commit().unwrap();
}
{
index_writer.add_document(doc!(text_field=>"c"));
index_writer.commit()?;
index_writer.commit().unwrap();
}
{
let reader = index.reader()?;
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let term_a = Term::from_field_text(text_field, "a");
assert_eq!(searcher.doc_freq(&term_a), 3);
@@ -406,50 +401,67 @@ mod tests {
let term_d = Term::from_field_text(text_field, "d");
assert_eq!(searcher.doc_freq(&term_d), 0);
}
Ok(())
}
#[test]
fn test_fieldnorm_no_docs_with_field() -> crate::Result<()> {
fn test_fieldnorm_no_docs_with_field() {
let mut schema_builder = Schema::builder();
let title_field = schema_builder.add_text_field("title", TEXT);
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.commit()?;
let index_reader = index.reader()?;
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
{
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?;
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
let doc = doc!(text_field=>"a b c");
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
}
{
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?;
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
let index_reader = index.reader().unwrap();
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
{
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
}
{
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field);
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
}
}
Ok(())
}
#[test]
fn test_fieldnorm() -> crate::Result<()> {
fn test_fieldnorm() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!());
index_writer.add_document(doc!(text_field=>"a b"));
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?;
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
Ok(())
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
let doc = doc!(text_field=>"a b c");
index_writer.add_document(doc);
}
{
let doc = doc!();
index_writer.add_document(doc);
}
{
let doc = doc!(text_field=>"a b");
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
}
{
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
}
}
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
@@ -464,7 +476,7 @@ mod tests {
}
#[test]
fn test_delete_postings1() -> crate::Result<()> {
fn test_delete_postings1() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let term_abcd = Term::from_field_text(text_field, "abcd");
@@ -480,7 +492,7 @@ mod tests {
.unwrap();
{
// writing the segment
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// 0
index_writer.add_document(doc!(text_field=>"a b"));
// 1
@@ -496,10 +508,10 @@ mod tests {
index_writer.add_document(doc!(text_field=>" b c"));
// 5
index_writer.add_document(doc!(text_field=>" a"));
index_writer.commit()?;
index_writer.commit().unwrap();
}
{
reader.reload()?;
reader.reload().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(text_field);
@@ -527,15 +539,15 @@ mod tests {
}
{
// writing the segment
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// 0
index_writer.add_document(doc!(text_field=>"a b"));
// 1
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback()?;
index_writer.rollback().unwrap();
}
{
reader.reload()?;
reader.reload().unwrap();
let searcher = reader.searcher();
let seg_reader = searcher.segment_reader(0);
let inverted_index = seg_reader.inverted_index(term_abcd.field());
@@ -564,15 +576,15 @@ mod tests {
}
{
// writing the segment
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b"));
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.rollback()?;
index_writer.rollback().unwrap();
index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.commit()?;
index_writer.commit().unwrap();
}
{
reader.reload()?;
reader.reload().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(term_abcd.field());
@@ -604,20 +616,19 @@ mod tests {
assert!(!advance_undeleted(&mut postings, segment_reader));
}
}
Ok(())
}
#[test]
fn test_indexed_u64() -> crate::Result<()> {
fn test_indexed_u64() {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("value", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64));
index_writer.commit()?;
let reader = index.reader()?;
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let term = Term::from_field_u64(field, 1u64);
let mut postings = searcher
@@ -627,21 +638,20 @@ mod tests {
.unwrap();
assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED);
Ok(())
}
#[test]
fn test_indexed_i64() -> crate::Result<()> {
fn test_indexed_i64() {
let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_i64_field("value", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let negative_val = -1i64;
index_writer.add_document(doc!(value_field => negative_val));
index_writer.commit()?;
let reader = index.reader()?;
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let term = Term::from_field_i64(value_field, negative_val);
let mut postings = searcher
@@ -651,21 +661,20 @@ mod tests {
.unwrap();
assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED);
Ok(())
}
#[test]
fn test_indexed_f64() -> crate::Result<()> {
fn test_indexed_f64() {
let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_f64_field("value", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let val = std::f64::consts::PI;
index_writer.add_document(doc!(value_field => val));
index_writer.commit()?;
let reader = index.reader()?;
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let term = Term::from_field_f64(value_field, val);
let mut postings = searcher
@@ -675,29 +684,26 @@ mod tests {
.unwrap();
assert_eq!(postings.doc(), 0);
assert_eq!(postings.advance(), TERMINATED);
Ok(())
}
#[test]
fn test_indexedfield_not_in_documents() -> crate::Result<()> {
fn test_indexedfield_not_in_documents() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let absent_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a"));
assert!(index_writer.commit().is_ok());
let reader = index.reader()?;
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(absent_field); //< should not panic
assert_eq!(inverted_index.terms().num_terms(), 0);
Ok(())
segment_reader.inverted_index(absent_field); //< should not panic
}
#[test]
fn test_delete_postings2() -> crate::Result<()> {
fn test_delete_postings2() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
@@ -705,40 +711,53 @@ mod tests {
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()?;
.try_into()
.unwrap();
// writing the segment
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"63"));
index_writer.add_document(doc!(text_field=>"70"));
index_writer.add_document(doc!(text_field=>"34"));
index_writer.add_document(doc!(text_field=>"1"));
index_writer.add_document(doc!(text_field=>"38"));
index_writer.add_document(doc!(text_field=>"33"));
index_writer.add_document(doc!(text_field=>"40"));
index_writer.add_document(doc!(text_field=>"17"));
index_writer.delete_term(Term::from_field_text(text_field, "38"));
index_writer.delete_term(Term::from_field_text(text_field, "34"));
index_writer.commit()?;
reader.reload()?;
assert_eq!(reader.searcher().num_docs(), 6);
Ok(())
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
let doc = doc!(text_field=>val);
index_writer.add_document(doc);
};
let remove_document = |index_writer: &mut IndexWriter, val: &'static str| {
let delterm = Term::from_field_text(text_field, val);
index_writer.delete_term(delterm);
};
add_document(&mut index_writer, "63");
add_document(&mut index_writer, "70");
add_document(&mut index_writer, "34");
add_document(&mut index_writer, "1");
add_document(&mut index_writer, "38");
add_document(&mut index_writer, "33");
add_document(&mut index_writer, "40");
add_document(&mut index_writer, "17");
remove_document(&mut index_writer, "38");
remove_document(&mut index_writer, "34");
index_writer.commit().unwrap();
reader.reload().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 6);
}
#[test]
fn test_termfreq() -> crate::Result<()> {
fn test_termfreq() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"af af af bc bc"));
index_writer.commit()?;
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let doc = doc!(text_field=>"af af af bc bc");
index_writer.add_document(doc);
index_writer.commit().unwrap();
}
{
let index_reader = index.reader()?;
let index_reader = index.reader().unwrap();
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field);
@@ -754,63 +773,63 @@ mod tests {
assert_eq!(postings.term_freq(), 3);
assert_eq!(postings.advance(), TERMINATED);
}
Ok(())
}
#[test]
fn test_searcher_1() -> crate::Result<()> {
fn test_searcher_1() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let reader = index.reader()?;
// writing the segment
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"af af af b"));
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!(text_field=>"a b c d"));
index_writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
searcher
.search(&query, &TEST_COLLECTOR_WITH_SCORE)
.map(|topdocs| topdocs.docs().to_vec())
};
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?,
vec![DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?,
vec![DocAddress(0, 0)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?,
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "c")])?,
vec![DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "d")])?,
vec![DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![
Term::from_field_text(text_field, "b"),
Term::from_field_text(text_field, "a"),
])?,
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
);
Ok(())
let reader = index.reader().unwrap();
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"af af af b"));
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!(text_field=>"a b c d"));
index_writer.commit().unwrap();
}
{
reader.reload().unwrap();
let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
let topdocs = searcher.search(&query, &TEST_COLLECTOR_WITH_SCORE).unwrap();
topdocs.docs().to_vec()
};
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")]),
vec![DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")]),
vec![DocAddress(0, 0)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")]),
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "c")]),
vec![DocAddress(0, 1), DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "d")]),
vec![DocAddress(0, 2)]
);
assert_eq!(
get_doc_ids(vec![
Term::from_field_text(text_field, "b"),
Term::from_field_text(text_field, "a"),
]),
vec![DocAddress(0, 0), DocAddress(0, 1), DocAddress(0, 2)]
);
}
}
#[test]
fn test_searcher_2() -> crate::Result<()> {
fn test_searcher_2() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
@@ -818,17 +837,19 @@ mod tests {
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()?;
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0u64);
// writing the segment
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(text_field=>"af b"));
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!(text_field=>"a b c d"));
index_writer.commit()?;
reader.reload()?;
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"af b"));
index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc!(text_field=>"a b c d"));
index_writer.commit().unwrap();
}
reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 3u64);
Ok(())
}
#[test]
@@ -850,7 +871,7 @@ mod tests {
}
#[test]
fn test_wrong_fast_field_type() -> crate::Result<()> {
fn test_wrong_fast_field_type() {
let mut schema_builder = Schema::builder();
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
@@ -860,14 +881,14 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
{
let document =
doc!(fast_field_unsigned => 4u64, fast_field_signed=>4i64, fast_field_float=>4f64);
index_writer.add_document(document);
index_writer.commit()?;
index_writer.commit().unwrap();
}
let reader = index.reader()?;
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
{
@@ -906,12 +927,11 @@ mod tests {
let fast_field_reader = fast_field_reader_opt.unwrap();
assert_eq!(fast_field_reader.get(0), 4f64)
}
Ok(())
}
// motivated by #729
#[test]
fn test_update_via_delete_insert() -> crate::Result<()> {
fn test_update_via_delete_insert() {
use crate::collector::Count;
use crate::indexer::NoMergePolicy;
use crate::query::AllQuery;
@@ -925,17 +945,17 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index_reader = index.reader()?;
let index_reader = index.reader().unwrap();
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
for doc_id in 0u64..DOC_COUNT {
index_writer.add_document(doc!(id => doc_id));
}
index_writer.commit()?;
index_writer.commit().unwrap();
index_reader.reload()?;
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(
@@ -946,11 +966,12 @@ mod tests {
// update the 10 elements by deleting and re-adding
for doc_id in 0u64..DOC_COUNT {
index_writer.delete_term(Term::from_field_u64(id, doc_id));
index_writer.commit()?;
index_reader.reload()?;
index_writer.add_document(doc!(id => doc_id));
index_writer.commit()?;
index_reader.reload()?;
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let doc = doc!(id => doc_id);
index_writer.add_document(doc);
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
// The number of document should be stable.
assert_eq!(
@@ -959,7 +980,7 @@ mod tests {
);
}
index_reader.reload()?;
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
@@ -968,18 +989,12 @@ mod tests {
.collect();
block_on(index_writer.merge(&segment_ids)).unwrap();
index_reader.reload()?;
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(searcher.search(&AllQuery, &Count)?, DOC_COUNT as usize);
Ok(())
}
#[test]
fn test_validate_checksum() -> crate::Result<()> {
let index_path = tempfile::tempdir().expect("dir");
let schema = Schema::builder().build();
let index = Index::create_in_dir(&index_path, schema)?;
assert!(index.validate_checksum()?.is_empty());
Ok(())
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
}
}

View File

@@ -78,7 +78,6 @@ impl Positions {
}
}
#[derive(Clone)]
pub struct PositionReader {
skip_read: OwnedRead,
position_read: OwnedRead,

View File

@@ -1,21 +1,11 @@
use crate::common::{BinarySerializable, VInt};
use crate::directory::ReadOnlySource;
use crate::fieldnorm::FieldNormReader;
use crate::postings::compression::{
AlignedBuffer, BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE,
};
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED};
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
if let Some(first) = it.next() {
Some(it.fold(first, Score::max))
} else {
None
}
}
use crate::{DocId, TERMINATED};
/// `BlockSegmentPostings` is a cursor iterating over blocks
/// of documents.
@@ -24,18 +14,16 @@ fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
///
/// While it is useful for some very specific high-performance
/// use cases, you should prefer using `SegmentPostings` for most usage.
#[derive(Clone)]
pub struct BlockSegmentPostings {
pub(crate) doc_decoder: BlockDecoder,
loaded_offset: usize,
freq_decoder: BlockDecoder,
freq_reading_option: FreqReadingOption,
block_max_score_cache: Option<Score>,
doc_freq: u32,
doc_freq: usize,
data: ReadOnlySource,
pub(crate) skip_reader: SkipReader,
skip_reader: SkipReader,
}
fn decode_bitpacked_block(
@@ -59,14 +47,10 @@ fn decode_vint_block(
doc_offset: DocId,
num_vint_docs: usize,
) {
let num_consumed_bytes =
doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs, TERMINATED);
doc_decoder.clear();
let num_consumed_bytes = doc_decoder.uncompress_vint_sorted(data, doc_offset, num_vint_docs);
if let Some(freq_decoder) = freq_decoder_opt {
freq_decoder.uncompress_vint_unsorted(
&data[num_consumed_bytes..],
num_vint_docs,
TERMINATED,
);
freq_decoder.uncompress_vint_unsorted(&data[num_consumed_bytes..], num_vint_docs);
}
}
@@ -105,63 +89,20 @@ impl BlockSegmentPostings {
None => SkipReader::new(ReadOnlySource::empty(), doc_freq, record_option),
};
let doc_freq = doc_freq as usize;
let mut block_segment_postings = BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: std::usize::MAX,
freq_decoder: BlockDecoder::with_val(1),
freq_reading_option,
block_max_score_cache: None,
doc_freq,
data: postings_data,
skip_reader,
};
block_segment_postings.load_block();
block_segment_postings.advance();
block_segment_postings
}
/// Returns the block_max_score for the current block.
/// It does not require the block to be loaded. For instance, it is ok to call this method
/// after having called `.shallow_advance(..)`.
///
/// See `TermScorer::block_max_score(..)` for more information.
pub fn block_max_score(
&mut self,
fieldnorm_reader: &FieldNormReader,
bm25_weight: &BM25Weight,
) -> Score {
if let Some(score) = self.block_max_score_cache {
return score;
}
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
// if we are on a full block, the skip reader should have the block max information
// for us
self.block_max_score_cache = Some(skip_reader_max_score);
return skip_reader_max_score;
}
// this is the last block of the segment posting list.
// If it is actually loaded, we can compute block max manually.
if self.block_is_loaded() {
let docs = self.doc_decoder.output_array().iter().cloned();
let freqs = self.freq_decoder.output_array().iter().cloned();
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
bm25_weight.score(fieldnorm_id, term_freq)
});
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
self.block_max_score_cache = Some(block_max_score);
return block_max_score;
}
// We do not have access to any good block max value. We return bm25_weight.max_score()
// as it is a valid upperbound.
//
// We do not cache it however, so that it gets computed when once block is loaded.
bm25_weight.max_score()
}
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.freq_reading_option
}
// Resets the block segment postings on another position
// in the postings file.
//
@@ -175,23 +116,21 @@ impl BlockSegmentPostings {
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: ReadOnlySource) {
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, postings_data);
self.data = ReadOnlySource::new(postings_data);
self.block_max_score_cache = None;
self.loaded_offset = std::usize::MAX;
self.loaded_offset = std::usize::MAX;
if let Some(skip_data) = skip_data_opt {
self.skip_reader.reset(skip_data, doc_freq);
} else {
self.skip_reader.reset(ReadOnlySource::empty(), doc_freq);
}
self.doc_freq = doc_freq;
self.load_block();
self.doc_freq = doc_freq as usize;
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
/// Returns the document frequency associated to this block postings.
///
/// This `doc_freq` is simply the sum of the length of all of the blocks
/// length, and it does not take in account deleted documents.
pub fn doc_freq(&self) -> u32 {
pub fn doc_freq(&self) -> usize {
self.doc_freq
}
@@ -201,20 +140,11 @@ impl BlockSegmentPostings {
/// returned by `.docs()` is empty.
#[inline]
pub fn docs(&self) -> &[DocId] {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_array()
}
/// Returns a full block, regardless of whetehr the block is complete or incomplete (
/// as it happens for the last block of the posting list).
///
/// In the latter case, the block is guaranteed to be padded with the sentinel value:
/// `TERMINATED`. The array is also guaranteed to be aligned on 16 bytes = 128 bits.
///
/// This method is useful to run SSE2 linear search.
#[inline(always)]
pub(crate) fn docs_aligned(&self) -> &AlignedBuffer {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_aligned()
}
@@ -227,14 +157,12 @@ impl BlockSegmentPostings {
/// Return the array of `term freq` in the block.
#[inline]
pub fn freqs(&self) -> &[u32] {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output_array()
}
/// Return the frequency at index `idx` of the block.
#[inline]
pub fn freq(&self, idx: usize) -> u32 {
debug_assert!(self.block_is_loaded());
self.freq_decoder.output(idx)
}
@@ -245,40 +173,23 @@ impl BlockSegmentPostings {
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
#[inline]
pub fn block_len(&self) -> usize {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_len
}
pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Position on a block that may contains `target_doc`.
///
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub fn seek(&mut self, target_doc: DocId) {
self.shallow_seek(target_doc);
self.skip_reader.seek(target_doc);
self.load_block();
}
pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Dangerous API! This calls seek on the skip list,
/// but does not `.load_block()` afterwards.
///
/// `.load_block()` needs to be called manually afterwards.
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
if self.skip_reader.seek(target_doc) {
self.block_max_score_cache = None;
}
}
pub(crate) fn block_is_loaded(&self) -> bool {
self.loaded_offset == self.skip_reader.byte_offset()
}
pub(crate) fn load_block(&mut self) {
fn load_block(&mut self) {
let offset = self.skip_reader.byte_offset();
if self.loaded_offset == offset {
return;
@@ -303,14 +214,7 @@ impl BlockSegmentPostings {
tf_num_bits,
);
}
BlockInfo::VInt { num_docs } => {
let data = {
if num_docs == 0 {
&[]
} else {
&self.data.as_slice()[offset..]
}
};
BlockInfo::VInt(num_vint_docs) => {
decode_vint_block(
&mut self.doc_decoder,
if let FreqReadingOption::ReadFreq = self.freq_reading_option {
@@ -318,9 +222,9 @@ impl BlockSegmentPostings {
} else {
None
},
data,
&self.data.as_slice()[offset..],
self.skip_reader.last_doc_in_previous_block,
num_docs as usize,
num_vint_docs as usize,
);
}
}
@@ -329,20 +233,21 @@ impl BlockSegmentPostings {
/// Advance to the next block.
///
/// Returns false iff there was no remaining blocks.
pub fn advance(&mut self) {
self.skip_reader.advance();
self.block_max_score_cache = None;
pub fn advance(&mut self) -> bool {
if !self.skip_reader.advance() {
return false;
}
self.load_block();
true
}
/// Returns an empty segment postings object
pub fn empty() -> BlockSegmentPostings {
BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
loaded_offset: 0,
loaded_offset: std::usize::MAX,
freq_decoder: BlockDecoder::with_val(1),
freq_reading_option: FreqReadingOption::NoFreq,
block_max_score_cache: None,
doc_freq: 0,
data: ReadOnlySource::new(vec![]),
skip_reader: SkipReader::new(ReadOnlySource::new(vec![]), 0, IndexRecordOption::Basic),
@@ -368,10 +273,8 @@ mod tests {
#[test]
fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty();
assert_eq!(postings.doc(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.doc_freq(), 0);
assert_eq!(postings.len(), 0);
}
@@ -391,10 +294,7 @@ mod tests {
#[test]
fn test_empty_block_segment_postings() {
let mut postings = BlockSegmentPostings::empty();
assert!(postings.docs().is_empty());
assert_eq!(postings.doc_freq(), 0);
postings.advance();
assert!(postings.docs().is_empty());
assert!(!postings.advance());
assert_eq!(postings.doc_freq(), 0);
}
@@ -406,14 +306,13 @@ mod tests {
assert_eq!(block_segments.doc_freq(), 100_000);
loop {
let block = block_segments.docs();
if block.is_empty() {
break;
}
for (i, doc) in block.iter().cloned().enumerate() {
assert_eq!(offset + (i as u32), doc);
}
offset += block.len() as u32;
block_segments.advance();
if block_segments.advance() {
break;
}
}
}
@@ -455,7 +354,7 @@ mod tests {
let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut last_doc = 0u32;
for &doc in docs {
for _ in last_doc..doc {
@@ -474,7 +373,7 @@ mod tests {
}
#[test]
fn test_block_segment_postings_seek() {
fn test_block_segment_postings_skip2() {
let mut docs = vec![0];
for i in 0..1300 {
docs.push((i * i / 100) + i);
@@ -496,7 +395,7 @@ mod tests {
let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// create two postings list, one containg even number,
// the other containing odd numbers.
for i in 0..6 {
@@ -522,6 +421,7 @@ mod tests {
let term_info = inverted_index.get_term_info(&term).unwrap();
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments);
}
assert!(block_segments.advance());
assert_eq!(block_segments.docs(), &[1, 3, 5]);
}
}

View File

@@ -1,4 +1,5 @@
use crate::common::FixedSize;
use crate::docset::TERMINATED;
use bitpacking::{BitPacker, BitPacker4x};
pub const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
@@ -52,10 +53,8 @@ impl BlockEncoder {
/// We ensure that the OutputBuffer is align on 128 bits
/// in order to run SSE2 linear search on it.
#[repr(align(128))]
#[derive(Clone)]
pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
#[derive(Clone)]
pub struct BlockDecoder {
bitpacker: BitPacker4x,
output: AlignedBuffer,
@@ -108,6 +107,10 @@ impl BlockDecoder {
pub fn output(&self, idx: usize) -> u32 {
self.output.0[idx]
}
pub fn clear(&mut self) {
self.output.0.iter_mut().for_each(|el| *el = TERMINATED);
}
}
pub trait VIntEncoder {
@@ -144,14 +147,11 @@ pub trait VIntDecoder {
/// For instance, if delta encoded are `1, 3, 9`, and the
/// `offset` is 5, then the output will be:
/// `5 + 1 = 6, 6 + 3= 9, 9 + 9 = 18`
///
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
fn uncompress_vint_sorted(
&mut self,
compressed_data: &[u8],
offset: u32,
num_els: usize,
padding: u32,
) -> usize;
/// Uncompress an array of `u32s`, compressed using variable
@@ -159,14 +159,7 @@ pub trait VIntDecoder {
///
/// The method takes a number of int to decompress, and returns
/// the amount of bytes that were read to decompress them.
///
/// The value given in `padding` will be used to fill the remaining `128 - num_els` values.
fn uncompress_vint_unsorted(
&mut self,
compressed_data: &[u8],
num_els: usize,
padding: u32,
) -> usize;
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize;
}
impl VIntEncoder for BlockEncoder {
@@ -185,21 +178,13 @@ impl VIntDecoder for BlockDecoder {
compressed_data: &[u8],
offset: u32,
num_els: usize,
padding: u32,
) -> usize {
self.output_len = num_els;
self.output.0.iter_mut().for_each(|el| *el = padding);
vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
}
fn uncompress_vint_unsorted(
&mut self,
compressed_data: &[u8],
num_els: usize,
padding: u32,
) -> usize {
fn uncompress_vint_unsorted(&mut self, compressed_data: &[u8], num_els: usize) -> usize {
self.output_len = num_els;
self.output.0.iter_mut().for_each(|el| *el = padding);
vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
}
}
@@ -208,7 +193,6 @@ impl VIntDecoder for BlockDecoder {
pub mod tests {
use super::*;
use crate::TERMINATED;
#[test]
fn test_encode_sorted_block() {
@@ -287,20 +271,18 @@ pub mod tests {
}
#[test]
fn test_encode_vint() {
const PADDING_VALUE: u32 = 234_234_345u32;
let expected_length = 154;
let mut encoder = BlockEncoder::new();
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
for offset in &[0u32, 1u32, 2u32] {
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
assert!(encoded_data.len() <= expected_length);
let mut decoder = BlockDecoder::default();
let consumed_num_bytes =
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE);
assert_eq!(consumed_num_bytes, encoded_data.len());
assert_eq!(input, decoder.output_array());
for i in input.len()..COMPRESSION_BLOCK_SIZE {
assert_eq!(decoder.output(i), PADDING_VALUE);
{
let expected_length = 154;
let mut encoder = BlockEncoder::new();
let input: Vec<u32> = (0u32..123u32).map(|i| 4 + i * 7 / 2).into_iter().collect();
for offset in &[0u32, 1u32, 2u32] {
let encoded_data = encoder.compress_vint_sorted(&input, *offset);
assert!(encoded_data.len() <= expected_length);
let mut decoder = BlockDecoder::default();
let consumed_num_bytes =
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len());
assert_eq!(consumed_num_bytes, encoded_data.len());
assert_eq!(input, decoder.output_array());
}
}
}
@@ -310,7 +292,6 @@ pub mod tests {
mod bench {
use super::*;
use crate::TERMINATED;
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
@@ -341,7 +322,7 @@ mod bench {
let mut encoder = BlockEncoder::new();
let data = generate_array(COMPRESSION_BLOCK_SIZE, 0.1);
let (num_bits, compressed) = encoder.compress_block_sorted(&data, 0u32);
let mut decoder = BlockDecoder::default();
let mut decoder = BlockDecoder::new();
b.iter(|| {
decoder.uncompress_block_sorted(compressed, 0u32, num_bits);
});
@@ -376,9 +357,9 @@ mod bench {
let mut encoder = BlockEncoder::new();
let data = generate_array(NUM_INTS_BENCH_VINT, 0.001);
let compressed = encoder.compress_vint_sorted(&data, 0u32);
let mut decoder = BlockDecoder::default();
let mut decoder = BlockDecoder::new();
b.iter(|| {
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT, TERMINATED);
decoder.uncompress_vint_sorted(compressed, 0u32, NUM_INTS_BENCH_VINT);
});
}
}

View File

@@ -65,42 +65,45 @@ pub mod tests {
use std::iter;
#[test]
pub fn test_position_write() -> crate::Result<()> {
pub fn test_position_write() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment)?;
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4, None)?;
field_serializer.new_term("abc".as_bytes(), 12u32)?;
for doc_id in 0u32..120u32 {
let delta_positions = vec![1, 2, 3, 2];
field_serializer.write_doc(doc_id, 4, &delta_positions)?;
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
{
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
field_serializer.new_term("abc".as_bytes()).unwrap();
for doc_id in 0u32..120u32 {
let delta_positions = vec![1, 2, 3, 2];
field_serializer
.write_doc(doc_id, 4, &delta_positions)
.unwrap();
}
field_serializer.close_term().unwrap();
}
field_serializer.close_term()?;
posting_serializer.close()?;
let read = segment.open_read(SegmentComponent::POSITIONS)?;
posting_serializer.close().unwrap();
let read = segment.open_read(SegmentComponent::POSITIONS).unwrap();
assert!(read.len() <= 140);
Ok(())
}
#[test]
pub fn test_skip_positions() -> crate::Result<()> {
pub fn test_skip_positions() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut index_writer = index.writer_with_num_threads(1, 30_000_000).unwrap();
index_writer.add_document(doc!(title => r#"abc abc abc"#));
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
for _ in 0..1_000 {
index_writer.add_document(doc!(title => r#"abc abc abc"#));
}
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
index_writer.commit()?;
index_writer.commit().unwrap();
let searcher = index.reader()?.searcher();
let searcher = index.reader().unwrap().searcher();
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
let term = Term::from_field_text(title, "abc");
let mut positions = Vec::new();
@@ -155,7 +158,6 @@ pub mod tests {
postings.positions(&mut positions);
assert_eq!(&[0, 5], &positions[..]);
}
Ok(())
}
#[test]
@@ -176,7 +178,7 @@ pub mod tests {
.tokenizers()
.register("simple_no_truncation", SimpleTokenizer);
let reader = index.reader().unwrap();
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
{
index_writer.add_document(doc!(text_field=>exceeding_token_text));
@@ -205,7 +207,7 @@ pub mod tests {
}
#[test]
pub fn test_position_and_fieldnorm1() -> crate::Result<()> {
pub fn test_position_and_fieldnorm1() {
let mut positions = Vec::new();
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
@@ -217,38 +219,42 @@ pub mod tests {
let mut segment_writer =
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
{
let mut doc = Document::default();
// checking that position works if the field has two values
doc.add_text(text_field, "a b a c a d a a.");
doc.add_text(text_field, "d d d d a");
let op = AddOperation {
opstamp: 0u64,
document: doc!(
text_field => "a b a c a d a a.",
text_field => "d d d d a"
),
document: doc,
};
segment_writer.add_document(op, &schema)?;
segment_writer.add_document(op, &schema).unwrap();
}
{
let mut doc = Document::default();
doc.add_text(text_field, "b a");
let op = AddOperation {
opstamp: 1u64,
document: doc!(text_field => "b a"),
document: doc,
};
segment_writer.add_document(op, &schema).unwrap();
}
for i in 2..1000 {
let mut text: String = iter::repeat("e ").take(i).collect();
let mut doc = Document::default();
let mut text = iter::repeat("e ").take(i).collect::<String>();
text.push_str(" a");
doc.add_text(text_field, &text);
let op = AddOperation {
opstamp: 2u64,
document: doc!(text_field => text),
document: doc,
};
segment_writer.add_document(op, &schema).unwrap();
}
segment_writer.finalize()?;
segment_writer.finalize().unwrap();
}
{
let segment_reader = SegmentReader::open(&segment)?;
let segment_reader = SegmentReader::open(&segment).unwrap();
{
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field)?;
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
for i in 2..1000 {
@@ -308,7 +314,6 @@ pub mod tests {
assert_eq!(postings_e.doc(), TERMINATED);
}
}
Ok(())
}
#[test]
@@ -319,7 +324,7 @@ pub mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "g b b d c g c"));
index_writer.add_document(doc!(text_field => "g a b b a d c g c"));
assert!(index_writer.commit().is_ok());
@@ -351,7 +356,7 @@ pub mod tests {
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for i in 0u64..num_docs as u64 {
let doc = doc!(value_field => 2u64, value_field => i % 2u64);
index_writer.add_document(doc);
@@ -422,7 +427,7 @@ pub mod tests {
// delete some of the documents
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_0);
assert!(index_writer.commit().is_ok());
}
@@ -476,7 +481,7 @@ pub mod tests {
// delete everything else
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_1);
assert!(index_writer.commit().is_ok());
}
@@ -519,7 +524,7 @@ pub mod tests {
let index = Index::create_in_ram(schema);
let posting_list_size = 1_000_000;
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for _ in 0..posting_list_size {
let mut doc = Document::default();
if rng.gen_bool(1f64 / 15f64) {
@@ -577,9 +582,6 @@ pub mod tests {
) {
for target in targets {
let mut postings_opt = postings_factory();
if target < postings_opt.doc() {
continue;
}
let mut postings_unopt = UnoptimizedDocSet::wrap(postings_factory());
let skip_result_opt = postings_opt.seek(target);
let skip_result_unopt = postings_unopt.seek(target);
@@ -727,7 +729,7 @@ mod bench {
let mut s = 0u32;
while segment_postings.doc() != TERMINATED {
s += (segment_postings.doc() & n) % 1024;
segment_postings.advance();
segment_postings.advance()
}
s
});

View File

@@ -1,6 +1,5 @@
use super::stacker::{Addr, MemoryArena, TermHashMap};
use crate::fieldnorm::FieldNormReaders;
use crate::postings::recorder::{
BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder,
};
@@ -105,7 +104,6 @@ impl MultiFieldPostingsWriter {
doc: DocId,
field: Field,
token_stream: &mut dyn TokenStream,
term_buffer: &mut Term,
) -> u32 {
let postings_writer =
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
@@ -115,7 +113,6 @@ impl MultiFieldPostingsWriter {
field,
token_stream,
&mut self.heap,
term_buffer,
)
}
@@ -131,7 +128,6 @@ impl MultiFieldPostingsWriter {
pub fn serialize(
&self,
serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect();
@@ -165,12 +161,8 @@ impl MultiFieldPostingsWriter {
}
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize];
let fieldnorm_reader = fieldnorm_readers.get_field(field);
let mut field_serializer = serializer.new_field(
field,
postings_writer.total_num_tokens(),
fieldnorm_reader,
)?;
let mut field_serializer =
serializer.new_field(field, postings_writer.total_num_tokens())?;
postings_writer.serialize(
&term_offsets[start..stop],
&mut field_serializer,
@@ -222,22 +214,21 @@ pub trait PostingsWriter {
field: Field,
token_stream: &mut dyn TokenStream,
heap: &mut MemoryArena,
term_buffer: &mut Term,
) -> u32 {
term_buffer.set_field(field);
let mut term = Term::for_field(field);
let mut sink = |token: &Token| {
// We skip all tokens with a len greater than u16.
if token.text.len() > MAX_TOKEN_LEN {
return;
if token.text.len() <= MAX_TOKEN_LEN {
term.set_text(token.text.as_str());
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
} else {
info!(
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \
MAX_TOKEN_LEN in the documentation for more information.",
token.text.len(),
MAX_TOKEN_LEN
);
}
term_buffer.set_text(token.text.as_str());
self.subscribe(
term_index,
doc_id,
token.position as u32,
&term_buffer,
heap,
);
};
token_stream.process(&mut sink)
}
@@ -306,8 +297,7 @@ impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec>
let mut buffer_lender = BufferLender::default();
for &(term_bytes, addr, _) in term_addrs {
let recorder: Rec = termdict_heap.read(addr);
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
serializer.new_term(&term_bytes[4..], term_doc_freq)?;
serializer.new_term(&term_bytes[4..])?;
recorder.serialize(&mut buffer_lender, serializer, heap)?;
serializer.close_term()?;
}

View File

@@ -75,10 +75,6 @@ pub(crate) trait Recorder: Copy + 'static {
serializer: &mut FieldSerializer<'_>,
heap: &MemoryArena,
) -> io::Result<()>;
/// Returns the number of document containing this term.
///
/// Returns `None` if not available.
fn term_doc_freq(&self) -> Option<u32>;
}
/// Only records the doc ids
@@ -117,16 +113,11 @@ impl Recorder for NothingRecorder {
) -> io::Result<()> {
let buffer = buffer_lender.lend_u8();
self.stack.read_to_end(heap, buffer);
// TODO avoid reading twice.
for doc in VInt32Reader::new(&buffer[..]) {
serializer.write_doc(doc as u32, 0u32, &[][..])?;
}
Ok(())
}
fn term_doc_freq(&self) -> Option<u32> {
None
}
}
/// Recorder encoding document ids, and term frequencies
@@ -135,7 +126,6 @@ pub struct TermFrequencyRecorder {
stack: ExpUnrolledLinkedList,
current_doc: DocId,
current_tf: u32,
term_doc_freq: u32,
}
impl Recorder for TermFrequencyRecorder {
@@ -144,7 +134,6 @@ impl Recorder for TermFrequencyRecorder {
stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(),
current_tf: 0u32,
term_doc_freq: 0u32,
}
}
@@ -153,7 +142,6 @@ impl Recorder for TermFrequencyRecorder {
}
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.term_doc_freq += 1;
self.current_doc = doc;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
}
@@ -184,10 +172,6 @@ impl Recorder for TermFrequencyRecorder {
Ok(())
}
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
}
/// Recorder encoding term frequencies as well as positions.
@@ -195,14 +179,12 @@ impl Recorder for TermFrequencyRecorder {
pub struct TFAndPositionRecorder {
stack: ExpUnrolledLinkedList,
current_doc: DocId,
term_doc_freq: u32,
}
impl Recorder for TFAndPositionRecorder {
fn new() -> Self {
TFAndPositionRecorder {
stack: ExpUnrolledLinkedList::new(),
current_doc: u32::max_value(),
term_doc_freq: 0u32,
}
}
@@ -212,7 +194,6 @@ impl Recorder for TFAndPositionRecorder {
fn new_doc(&mut self, doc: DocId, heap: &mut MemoryArena) {
self.current_doc = doc;
self.term_doc_freq += 1u32;
let _ = write_u32_vint(doc, &mut self.stack.writer(heap));
}
@@ -252,10 +233,6 @@ impl Recorder for TFAndPositionRecorder {
}
Ok(())
}
fn term_doc_freq(&self) -> Option<u32> {
Some(self.term_doc_freq)
}
}
#[cfg(test)]

View File

@@ -10,10 +10,9 @@ use crate::postings::BlockSearcher;
use crate::postings::Postings;
use crate::schema::IndexRecordOption;
use crate::{DocId, TERMINATED};
use crate::DocId;
use crate::directory::ReadOnlySource;
use crate::fastfield::DeleteBitSet;
use crate::postings::BlockSegmentPostings;
/// `SegmentPostings` represents the inverted list or postings associated to
@@ -21,9 +20,8 @@ use crate::postings::BlockSegmentPostings;
///
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
/// Positions on the other hand, are optionally entirely decoded upfront.
#[derive(Clone)]
pub struct SegmentPostings {
pub(crate) block_cursor: BlockSegmentPostings,
block_cursor: BlockSegmentPostings,
cur: usize,
position_reader: Option<PositionReader>,
block_searcher: BlockSearcher,
@@ -40,31 +38,6 @@ impl SegmentPostings {
}
}
/// Compute the number of non-deleted documents.
///
/// This method will clone and scan through the posting lists.
/// (this is a rather expensive operation).
pub fn doc_freq_given_deletes(&self, delete_bitset: &DeleteBitSet) -> u32 {
let mut docset = self.clone();
let mut doc_freq = 0;
loop {
let doc = docset.doc();
if doc == TERMINATED {
return doc_freq;
}
if delete_bitset.is_alive(doc) {
doc_freq += 1u32;
}
docset.advance();
}
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
pub fn doc_freq(&self) -> u32 {
self.block_cursor.doc_freq()
}
/// Creates a segment postings object with the given documents
/// and no frequency encoded.
///
@@ -76,9 +49,7 @@ impl SegmentPostings {
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
let mut buffer = Vec::new();
{
let mut postings_serializer =
PostingsSerializer::new(&mut buffer, 0.0, false, false, None);
postings_serializer.new_term(docs.len() as u32);
let mut postings_serializer = PostingsSerializer::new(&mut buffer, false, false);
for &doc in docs {
postings_serializer.write_doc(doc, 1u32);
}
@@ -95,51 +66,6 @@ impl SegmentPostings {
SegmentPostings::from_block_postings(block_segment_postings, None)
}
/// Helper functions to create `SegmentPostings` for tests.
#[cfg(test)]
pub fn create_from_docs_and_tfs(
doc_and_tfs: &[(u32, u32)],
fieldnorms: Option<&[u32]>,
) -> SegmentPostings {
use crate::fieldnorm::FieldNormReader;
use crate::Score;
let mut buffer: Vec<u8> = Vec::new();
let fieldnorm_reader = fieldnorms.map(FieldNormReader::for_test);
let average_field_norm = fieldnorms
.map(|fieldnorms| {
if fieldnorms.len() == 0 {
return 0.0;
}
let total_num_tokens: u64 = fieldnorms
.iter()
.map(|&fieldnorm| fieldnorm as u64)
.sum::<u64>();
total_num_tokens as Score / fieldnorms.len() as Score
})
.unwrap_or(0.0);
let mut postings_serializer = PostingsSerializer::new(
&mut buffer,
average_field_norm,
true,
false,
fieldnorm_reader,
);
postings_serializer.new_term(doc_and_tfs.len() as u32);
for &(doc, tf) in doc_and_tfs {
postings_serializer.write_doc(doc, tf);
}
postings_serializer
.close_term(doc_and_tfs.len() as u32)
.unwrap();
let block_segment_postings = BlockSegmentPostings::from_data(
doc_and_tfs.len() as u32,
ReadOnlySource::from(buffer),
IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs,
);
SegmentPostings::from_block_postings(block_segment_postings, None)
}
/// Reads a Segment postings from an &[u8]
///
/// * `len` - number of document in the posting lists.
@@ -164,7 +90,6 @@ impl DocSet for SegmentPostings {
// next needs to be called a first time to point to the correct element.
#[inline]
fn advance(&mut self) -> DocId {
debug_assert!(self.block_cursor.block_is_loaded());
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
self.cur = 0;
self.block_cursor.advance();
@@ -175,15 +100,14 @@ impl DocSet for SegmentPostings {
}
fn seek(&mut self, target: DocId) -> DocId {
debug_assert!(self.doc() <= target);
if self.doc() >= target {
return self.doc();
if self.doc() == target {
return target;
}
self.block_cursor.seek(target);
// At this point we are on the block, that might contain our document.
let output = self.block_cursor.docs_aligned();
self.cur = self.block_searcher.search_in_block(&output, target);
// The last block is not full and padded with the value TERMINATED,
@@ -199,7 +123,6 @@ impl DocSet for SegmentPostings {
// After the search, the cursor should point to the first value of TERMINATED.
let doc = output.0[self.cur];
debug_assert!(doc >= target);
debug_assert_eq!(doc, self.doc());
doc
}
@@ -216,7 +139,7 @@ impl DocSet for SegmentPostings {
impl HasLen for SegmentPostings {
fn len(&self) -> usize {
self.block_cursor.doc_freq() as usize
self.block_cursor.doc_freq()
}
}
@@ -271,7 +194,6 @@ mod tests {
use crate::common::HasLen;
use crate::docset::{DocSet, TERMINATED};
use crate::fastfield::DeleteBitSet;
use crate::postings::postings::Postings;
#[test]
@@ -294,14 +216,4 @@ mod tests {
let postings = SegmentPostings::empty();
assert_eq!(postings.term_freq(), 1);
}
#[test]
fn test_doc_freq() {
let docs = SegmentPostings::create_from_docs(&[0, 2, 10]);
assert_eq!(docs.doc_freq(), 3);
let delete_bitset = DeleteBitSet::for_test(&[2], 12);
assert_eq!(docs.doc_freq_given_deletes(&delete_bitset), 2);
let all_deleted = DeleteBitSet::for_test(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
assert_eq!(docs.doc_freq_given_deletes(&all_deleted), 0);
}
}

View File

@@ -3,16 +3,13 @@ use crate::common::{BinarySerializable, VInt};
use crate::common::{CompositeWrite, CountingWriter};
use crate::core::Segment;
use crate::directory::WritePtr;
use crate::fieldnorm::FieldNormReader;
use crate::positions::PositionSerializer;
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::skip::SkipSerializer;
use crate::query::BM25Weight;
use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::{DocId, Score};
use std::cmp::Ordering;
use crate::DocId;
use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing
@@ -92,22 +89,20 @@ impl InvertedIndexSerializer {
&mut self,
field: Field,
total_num_tokens: u64,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'_>> {
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field);
let postings_write = self.postings_write.for_field(field);
total_num_tokens.serialize(postings_write)?;
let positions_write = self.positions_write.for_field(field);
let positionsidx_write = self.positionsidx_write.for_field(field);
let field_type: FieldType = (*field_entry.field_type()).clone();
FieldSerializer::create(
&field_type,
total_num_tokens,
term_dictionary_write,
postings_write,
positions_write,
positionsidx_write,
fieldnorm_reader,
)
}
@@ -135,14 +130,11 @@ pub struct FieldSerializer<'a> {
impl<'a> FieldSerializer<'a> {
fn create(
field_type: &FieldType,
total_num_tokens: u64,
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
postings_write: &'a mut CountingWriter<WritePtr>,
positions_write: &'a mut CountingWriter<WritePtr>,
positionsidx_write: &'a mut CountingWriter<WritePtr>,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'a>> {
total_num_tokens.serialize(postings_write)?;
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() {
@@ -155,17 +147,8 @@ impl<'a> FieldSerializer<'a> {
_ => (false, false),
};
let term_dictionary_builder = TermDictionaryBuilder::create(term_dictionary_write)?;
let average_fieldnorm = fieldnorm_reader
.as_ref()
.map(|ff_reader| (total_num_tokens as Score / ff_reader.num_docs() as Score))
.unwrap_or(0.0);
let postings_serializer = PostingsSerializer::new(
postings_write,
average_fieldnorm,
term_freq_enabled,
position_enabled,
fieldnorm_reader,
);
let postings_serializer =
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
let positions_serializer_opt = if position_enabled {
Some(PositionSerializer::new(positions_write, positionsidx_write))
} else {
@@ -198,20 +181,18 @@ impl<'a> FieldSerializer<'a> {
/// Starts the postings for a new term.
/// * term - the term. It needs to come after the previous term according
/// to the lexicographical order.
/// * term_doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &[u8], term_doc_freq: u32) -> io::Result<TermOrdinal> {
/// * doc_freq - return the number of document containing the term.
pub fn new_term(&mut self, term: &[u8]) -> io::Result<TermOrdinal> {
assert!(
!self.term_open,
"Called new_term, while the previous term was not closed."
);
self.term_open = true;
self.postings_serializer.clear();
self.current_term_info = self.current_term_info();
self.term_dictionary_builder.insert_key(term)?;
let term_ordinal = self.num_terms;
self.num_terms += 1;
self.postings_serializer.new_term(term_doc_freq);
Ok(term_ordinal)
}
@@ -325,27 +306,14 @@ pub struct PostingsSerializer<W: Write> {
termfreq_enabled: bool,
termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
bm25_weight: Option<BM25Weight>,
num_docs: u32, // Number of docs in the segment
avg_fieldnorm: Score, // Average number of term in the field for that segment.
// this value is used to compute the block wand information.
}
impl<W: Write> PostingsSerializer<W> {
pub fn new(
write: W,
avg_fieldnorm: Score,
termfreq_enabled: bool,
termfreq_sum_enabled: bool,
fieldnorm_reader: Option<FieldNormReader>,
) -> PostingsSerializer<W> {
let num_docs = fieldnorm_reader
.as_ref()
.map(|fieldnorm_reader| fieldnorm_reader.num_docs())
.unwrap_or(0u32);
PostingsSerializer {
output_write: CountingWriter::wrap(write),
@@ -358,23 +326,6 @@ impl<W: Write> PostingsSerializer<W> {
last_doc_id_encoded: 0u32,
termfreq_enabled,
termfreq_sum_enabled,
fieldnorm_reader,
bm25_weight: None,
num_docs,
avg_fieldnorm,
}
}
pub fn new_term(&mut self, term_doc_freq: u32) {
if self.termfreq_enabled && self.num_docs > 0 {
let bm25_weight = BM25Weight::for_one_term(
term_doc_freq as u64,
self.num_docs as u64,
self.avg_fieldnorm,
);
self.bm25_weight = Some(bm25_weight);
}
}
@@ -391,6 +342,7 @@ impl<W: Write> PostingsSerializer<W> {
self.postings_write.extend(block_encoded);
}
if self.termfreq_enabled {
// encode the term_freqs
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_unsorted(&self.block.term_freqs());
@@ -400,31 +352,6 @@ impl<W: Write> PostingsSerializer<W> {
let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq);
}
let mut blockwand_params = (0u8, 0u32);
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
let docs = self.block.doc_ids().iter().cloned();
let term_freqs = self.block.term_freqs().iter().cloned();
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
blockwand_params = fieldnorms
.zip(term_freqs)
.max_by(
|(left_fieldnorm_id, left_term_freq),
(right_fieldnorm_id, right_term_freq)| {
let left_score =
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
let right_score =
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
left_score
.partial_cmp(&right_score)
.unwrap_or(Ordering::Equal)
},
)
.unwrap();
}
}
let (fieldnorm_id, term_freq) = blockwand_params;
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
}
self.block.clear();
}
@@ -473,7 +400,6 @@ impl<W: Write> PostingsSerializer<W> {
}
self.skip_write.clear();
self.postings_write.clear();
self.bm25_weight = None;
Ok(())
}

View File

@@ -1,9 +1,8 @@
use crate::common::{read_u32_vint_no_advance, serialize_vint_u32, BinarySerializable, VInt};
use crate::common::BinarySerializable;
use crate::directory::ReadOnlySource;
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
use crate::query::BM25Weight;
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED};
use crate::{DocId, TERMINATED};
use owned_read::OwnedRead;
pub struct SkipSerializer {
@@ -41,13 +40,6 @@ impl SkipSerializer {
.expect("Should never fail");
}
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
self.buffer.push(fieldnorm_id);
let mut buf = [0u8; 8];
let bytes = serialize_vint_u32(term_freq, &mut buf);
self.buffer.extend_from_slice(bytes);
}
pub fn data(&self) -> &[u8] {
&self.buffer[..]
}
@@ -58,7 +50,6 @@ impl SkipSerializer {
}
}
#[derive(Clone)]
pub(crate) struct SkipReader {
last_doc_in_block: DocId,
pub(crate) last_doc_in_previous_block: DocId,
@@ -78,74 +69,41 @@ pub(crate) enum BlockInfo {
doc_num_bits: u8,
tf_num_bits: u8,
tf_sum: u32,
block_wand_fieldnorm_id: u8,
block_wand_term_freq: u32,
},
VInt {
num_docs: u32,
},
VInt(u32),
}
impl Default for BlockInfo {
fn default() -> Self {
BlockInfo::VInt { num_docs: 0u32 }
BlockInfo::VInt(0)
}
}
impl SkipReader {
pub fn new(data: ReadOnlySource, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
let mut skip_reader = SkipReader {
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
0
} else {
TERMINATED
},
SkipReader {
last_doc_in_block: 0u32,
last_doc_in_previous_block: 0u32,
owned_read: OwnedRead::new(data),
skip_info,
block_info: BlockInfo::VInt { num_docs: doc_freq },
block_info: BlockInfo::default(),
byte_offset: 0,
remaining_docs: doc_freq,
position_offset: 0u64,
};
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
skip_reader.read_block_info();
}
skip_reader
}
pub fn reset(&mut self, data: ReadOnlySource, doc_freq: u32) {
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
0
} else {
TERMINATED
};
self.last_doc_in_block = 0u32;
self.last_doc_in_previous_block = 0u32;
self.owned_read = OwnedRead::new(data);
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
self.block_info = BlockInfo::default();
self.byte_offset = 0;
self.remaining_docs = doc_freq;
self.position_offset = 0u64;
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
self.read_block_info();
}
}
// Returns the block max score for this block if available.
//
// The block max score is available for all full bitpacked block,
// but no available for the last VInt encoded incomplete block.
pub fn block_max_score(&self, bm25_weight: &BM25Weight) -> Option<Score> {
match self.block_info {
BlockInfo::BitPacked {
block_wand_fieldnorm_id,
block_wand_term_freq,
..
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
BlockInfo::VInt { .. } => None,
}
}
#[cfg(test)]
#[inline(always)]
pub(crate) fn last_doc_in_block(&self) -> DocId {
self.last_doc_in_block
}
@@ -169,38 +127,25 @@ impl SkipReader {
doc_num_bits,
tf_num_bits: 0,
tf_sum: 0,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0,
};
}
IndexRecordOption::WithFreqs => {
let tf_num_bits = self.owned_read.get(1);
let block_wand_fieldnorm_id = self.owned_read.get(2);
let data = &self.owned_read.as_ref()[3..];
let (block_wand_term_freq, num_bytes) = read_u32_vint_no_advance(data);
self.owned_read.advance(3 + num_bytes);
self.block_info = BlockInfo::BitPacked {
doc_num_bits,
tf_num_bits,
tf_sum: 0,
block_wand_fieldnorm_id,
block_wand_term_freq,
};
self.owned_read.advance(2);
}
IndexRecordOption::WithFreqsAndPositions => {
let tf_num_bits = self.owned_read.get(1);
self.owned_read.advance(2);
let tf_sum = u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
let block_wand_fieldnorm_id = self.owned_read.get(0);
self.owned_read.advance(1);
let block_wand_term_freq =
VInt::deserialize_u64(&mut self.owned_read).unwrap() as u32;
self.block_info = BlockInfo::BitPacked {
doc_num_bits,
tf_num_bits,
tf_sum,
block_wand_fieldnorm_id,
block_wand_term_freq,
};
}
}
@@ -214,44 +159,35 @@ impl SkipReader {
///
/// If the target is larger than all documents, the skip_reader
/// then advance to the last Variable In block.
pub fn seek(&mut self, target: DocId) -> bool {
if self.last_doc_in_block() >= target {
return false;
}
loop {
pub fn seek(&mut self, target: DocId) {
while self.last_doc_in_block < target {
self.advance();
if self.last_doc_in_block() >= target {
return true;
}
}
}
pub fn advance(&mut self) {
pub fn advance(&mut self) -> bool {
match self.block_info {
BlockInfo::BitPacked {
doc_num_bits,
tf_num_bits,
tf_sum,
..
} => {
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
self.position_offset += tf_sum as u64;
}
BlockInfo::VInt { num_docs } => {
debug_assert_eq!(num_docs, self.remaining_docs);
self.remaining_docs = 0;
self.byte_offset = std::usize::MAX;
BlockInfo::VInt(num_vint_docs) => {
self.remaining_docs -= num_vint_docs;
}
}
self.last_doc_in_previous_block = self.last_doc_in_block;
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
self.read_block_info();
true
} else {
self.last_doc_in_block = TERMINATED;
self.block_info = BlockInfo::VInt {
num_docs: self.remaining_docs,
};
self.block_info = BlockInfo::VInt(self.remaining_docs);
self.remaining_docs > 0
}
}
}
@@ -271,10 +207,8 @@ mod tests {
let mut skip_serializer = SkipSerializer::new();
skip_serializer.write_doc(1u32, 2u8);
skip_serializer.write_term_freq(3u8);
skip_serializer.write_blockwand_max(13u8, 3u32);
skip_serializer.write_doc(5u32, 5u8);
skip_serializer.write_term_freq(2u8);
skip_serializer.write_blockwand_max(8u8, 2u32);
skip_serializer.data().to_owned()
};
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
@@ -283,35 +217,29 @@ mod tests {
doc_freq,
IndexRecordOption::WithFreqs,
);
assert!(skip_reader.advance());
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!(
skip_reader.block_info,
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
tf_num_bits: 3u8,
tf_sum: 0,
block_wand_fieldnorm_id: 13,
block_wand_term_freq: 3
tf_sum: 0
}
);
skip_reader.advance();
assert!(skip_reader.advance());
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 5u8,
tf_num_bits: 2u8,
tf_sum: 0,
block_wand_fieldnorm_id: 8,
block_wand_term_freq: 2
tf_sum: 0
}
);
skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
assert!(skip_reader.advance());
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
assert!(!skip_reader.advance());
}
#[test]
@@ -328,35 +256,29 @@ mod tests {
doc_freq,
IndexRecordOption::Basic,
);
assert!(skip_reader.advance());
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
tf_num_bits: 0,
tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
tf_sum: 0u32
}
);
skip_reader.advance();
assert!(skip_reader.advance());
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 5u8,
tf_num_bits: 0,
tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
tf_sum: 0u32
}
);
skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
assert!(skip_reader.advance());
assert_eq!(skip_reader.block_info(), BlockInfo::VInt(3u32));
assert!(!skip_reader.advance());
}
#[test]
@@ -372,18 +294,16 @@ mod tests {
doc_freq,
IndexRecordOption::Basic,
);
assert!(skip_reader.advance());
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
assert_eq!(
skip_reader.block_info(),
BlockInfo::BitPacked {
doc_num_bits: 2u8,
tf_num_bits: 0,
tf_sum: 0u32,
block_wand_fieldnorm_id: 0,
block_wand_term_freq: 0
tf_sum: 0u32
}
);
skip_reader.advance();
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
assert!(!skip_reader.advance());
}
}

View File

@@ -9,7 +9,7 @@ use crate::Score;
/// Query that matches all of the documents.
///
/// All of the document get the score 1.0.
/// All of the document get the score 1f32.
#[derive(Clone, Debug)]
pub struct AllQuery;
@@ -23,7 +23,7 @@ impl Query for AllQuery {
pub struct AllWeight;
impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
let all_scorer = AllScorer {
doc: 0u32,
max_doc: reader.max_doc(),
@@ -35,7 +35,7 @@ impl Weight for AllWeight {
if doc >= reader.max_doc() {
return Err(does_not_match(doc));
}
Ok(Explanation::new("AllQuery", 1.0))
Ok(Explanation::new("AllQuery", 1f32))
}
}
@@ -66,7 +66,7 @@ impl DocSet for AllScorer {
impl Scorer for AllScorer {
fn score(&mut self) -> Score {
1.0
1f32
}
}
@@ -83,7 +83,7 @@ mod tests {
let field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!(field=>"aaa"));
index_writer.add_document(doc!(field=>"bbb"));
index_writer.commit().unwrap();
@@ -100,7 +100,7 @@ mod tests {
let weight = AllQuery.weight(&searcher, false).unwrap();
{
let reader = searcher.segment_reader(0);
let mut scorer = weight.scorer(reader, 1.0).unwrap();
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), 1u32);
assert_eq!(scorer.doc(), 1u32);
@@ -108,7 +108,7 @@ mod tests {
}
{
let reader = searcher.segment_reader(1);
let mut scorer = weight.scorer(reader, 1.0).unwrap();
let mut scorer = weight.scorer(reader, 1.0f32).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), TERMINATED);
}
@@ -122,14 +122,14 @@ mod tests {
let weight = AllQuery.weight(&searcher, false).unwrap();
let reader = searcher.segment_reader(0);
{
let mut scorer = weight.scorer(reader, 2.0).unwrap();
let mut scorer = weight.scorer(reader, 2.0f32).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 2.0);
assert_eq!(scorer.score(), 2.0f32);
}
{
let mut scorer = weight.scorer(reader, 1.5).unwrap();
let mut scorer = weight.scorer(reader, 1.5f32).unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.5);
assert_eq!(scorer.score(), 1.5f32);
}
}
}

View File

@@ -5,8 +5,9 @@ use crate::query::{BitSetDocSet, Explanation};
use crate::query::{Scorer, Weight};
use crate::schema::{Field, IndexRecordOption};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::DocId;
use crate::Result;
use crate::TantivyError;
use crate::{DocId, Score};
use std::sync::Arc;
use tantivy_fst::Automaton;
@@ -39,9 +40,10 @@ impl<A> Weight for AutomatonWeight<A>
where
A: Automaton + Send + Sync + 'static,
{
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
let inverted_index = reader.inverted_index(self.field);
let term_dict = inverted_index.terms();
let mut term_stream = self.automaton_stream(term_dict);
@@ -50,14 +52,12 @@ where
let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
for &doc in docs {
for &doc in block_segment_postings.docs() {
doc_bitset.insert(doc);
}
block_segment_postings.advance();
if !block_segment_postings.advance() {
break;
}
}
}
let doc_bitset = BitSetDocSet::from(doc_bitset);
@@ -65,10 +65,10 @@ where
Ok(Box::new(const_scorer))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.seek(doc) == doc {
Ok(Explanation::new("AutomatonScorer", 1.0))
Ok(Explanation::new("AutomatonScorer", 1.0f32))
} else {
Err(TantivyError::InvalidArgument(
"Document does not exist".to_string(),
@@ -90,7 +90,7 @@ mod tests {
let mut schema = Schema::builder();
let title = schema.add_text_field("title", STRING);
let index = Index::create_in_ram(schema.build());
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(title=>"abc"));
index_writer.add_document(doc!(title=>"bcd"));
index_writer.add_document(doc!(title=>"abcd"));
@@ -143,13 +143,13 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.0)
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.0);
assert_eq!(scorer.score(), 1.0f32);
assert_eq!(scorer.advance(), 2u32);
assert_eq!(scorer.doc(), 2u32);
assert_eq!(scorer.score(), 1.0);
assert_eq!(scorer.score(), 1.0f32);
assert_eq!(scorer.advance(), TERMINATED);
}
@@ -161,9 +161,9 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let mut scorer = automaton_weight
.scorer(searcher.segment_reader(0u32), 1.32)
.scorer(searcher.segment_reader(0u32), 1.32f32)
.unwrap();
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.32);
assert_eq!(scorer.score(), 1.32f32);
}
}

View File

@@ -61,23 +61,21 @@ impl DocSet for BitSetDocSet {
}
fn seek(&mut self, target: DocId) -> DocId {
if target >= self.docs.max_value() {
self.doc = TERMINATED;
return TERMINATED;
}
let target_bucket = target / 64u32;
// Mask for all of the bits greater or equal
// to our target document.
if target_bucket > self.cursor_bucket {
self.go_to_bucket(target_bucket);
let greater_filter: TinySet = TinySet::range_greater_or_equal(target);
self.cursor_tinybitset = self.cursor_tinybitset.intersect(greater_filter);
self.advance()
} else {
let mut doc = self.doc();
while doc < target {
doc = self.advance();
}
doc
self.advance();
}
let mut doc = self.doc();
while doc < target {
doc = self.advance();
}
doc
}
/// Returns the current document
@@ -116,13 +114,6 @@ mod tests {
assert_eq!(empty.advance(), TERMINATED)
}
#[test]
fn test_seek_terminated() {
let bitset = BitSet::with_max_value(1000);
let mut empty = BitSetDocSet::from(bitset);
assert_eq!(empty.seek(TERMINATED), TERMINATED)
}
fn test_go_through_sequential(docs: &[DocId]) {
let mut docset = create_docbitset(docs, 1_000u32);
for &doc in docs {

View File

@@ -3,24 +3,21 @@ use crate::query::Explanation;
use crate::Score;
use crate::Searcher;
use crate::Term;
use serde::Deserialize;
use serde::Serialize;
const K1: Score = 1.2;
const B: Score = 0.75;
const K1: f32 = 1.2;
const B: f32 = 0.75;
fn idf(doc_freq: u64, doc_count: u64) -> Score {
assert!(doc_count >= doc_freq, "{} >= {}", doc_count, doc_freq);
let x = ((doc_count - doc_freq) as Score + 0.5) / (doc_freq as Score + 0.5);
(1.0 + x).ln()
fn idf(doc_freq: u64, doc_count: u64) -> f32 {
let x = ((doc_count - doc_freq) as f32 + 0.5) / (doc_freq as f32 + 0.5);
(1f32 + x).ln()
}
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: Score) -> Score {
K1 * (1.0 - B + B * fieldnorm as Score / average_fieldnorm)
fn cached_tf_component(fieldnorm: u32, average_fieldnorm: f32) -> f32 {
K1 * (1f32 - B + B * fieldnorm as f32 / average_fieldnorm)
}
fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] {
let mut cache: [Score; 256] = [0.0; 256];
fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
let mut cache = [0f32; 256];
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
@@ -28,22 +25,15 @@ fn compute_tf_cache(average_fieldnorm: Score) -> [Score; 256] {
cache
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct BM25Params {
pub idf: Score,
pub avg_fieldnorm: Score,
}
#[derive(Clone)]
pub struct BM25Weight {
idf_explain: Explanation,
weight: Score,
cache: [Score; 256],
average_fieldnorm: Score,
weight: f32,
cache: [f32; 256],
average_fieldnorm: f32,
}
impl BM25Weight {
pub fn boost_by(&self, boost: Score) -> BM25Weight {
pub fn boost_by(&self, boost: f32) -> BM25Weight {
BM25Weight {
idf_explain: self.idf_explain.clone(),
weight: self.weight * boost,
@@ -70,11 +60,19 @@ impl BM25Weight {
total_num_tokens += inverted_index.total_num_tokens();
total_num_docs += u64::from(segment_reader.max_doc());
}
let average_fieldnorm = total_num_tokens as Score / total_num_docs as Score;
let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
let mut idf_explain: Explanation;
if terms.len() == 1 {
let term_doc_freq = searcher.doc_freq(&terms[0]);
BM25Weight::for_one_term(term_doc_freq, total_num_docs, average_fieldnorm)
let idf = idf(term_doc_freq, total_num_docs);
idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as f32,
);
idf_explain.add_const("N, total number of docs", total_num_docs as f32);
} else {
let idf = terms
.iter()
@@ -82,30 +80,14 @@ impl BM25Weight {
let term_doc_freq = searcher.doc_freq(term);
idf(term_doc_freq, total_num_docs)
})
.sum::<Score>();
let idf_explain = Explanation::new("idf", idf);
BM25Weight::new(idf_explain, average_fieldnorm)
.sum::<f32>();
idf_explain = Explanation::new("idf", idf);
}
BM25Weight::new(idf_explain, average_fieldnorm)
}
pub fn for_one_term(
term_doc_freq: u64,
total_num_docs: u64,
avg_fieldnorm: Score,
) -> BM25Weight {
let idf = idf(term_doc_freq, total_num_docs);
let mut idf_explain =
Explanation::new("idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))", idf);
idf_explain.add_const(
"n, number of docs containing this term",
term_doc_freq as Score,
);
idf_explain.add_const("N, total number of docs", total_num_docs as Score);
BM25Weight::new(idf_explain, avg_fieldnorm)
}
fn new(idf_explain: Explanation, average_fieldnorm: Score) -> BM25Weight {
let weight = idf_explain.value() * (1.0 + K1);
fn new(idf_explain: Explanation, average_fieldnorm: f32) -> BM25Weight {
let weight = idf_explain.value() * (1f32 + K1);
BM25Weight {
idf_explain,
weight,
@@ -116,27 +98,19 @@ impl BM25Weight {
#[inline(always)]
pub fn score(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
self.weight * self.tf_factor(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> Score {
self.score(255u8, 2_013_265_944)
}
#[inline(always)]
pub(crate) fn tf_factor(&self, fieldnorm_id: u8, term_freq: u32) -> Score {
let term_freq = term_freq as Score;
let norm = self.cache[fieldnorm_id as usize];
term_freq / (term_freq + norm)
let term_freq = term_freq as f32;
self.weight * term_freq / (term_freq + norm)
}
pub fn explain(&self, fieldnorm_id: u8, term_freq: u32) -> Explanation {
// The explain format is directly copied from Lucene's.
// (So, Kudos to Lucene)
let score = self.score(fieldnorm_id, term_freq);
let norm = self.cache[fieldnorm_id as usize];
let term_freq = term_freq as Score;
let term_freq = term_freq as f32;
let right_factor = term_freq / (term_freq + norm);
let mut tf_explanation = Explanation::new(
@@ -149,12 +123,12 @@ impl BM25Weight {
tf_explanation.add_const("b, length normalization parameter", B);
tf_explanation.add_const(
"dl, length of field",
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as Score,
FieldNormReader::id_to_fieldnorm(fieldnorm_id) as f32,
);
tf_explanation.add_const("avgdl, average length of field", self.average_fieldnorm);
let mut explanation = Explanation::new("TermQuery, product of...", score);
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1.0));
explanation.add_detail(Explanation::new("(K1+1)", K1 + 1f32));
explanation.add_detail(self.idf_explain.clone());
explanation.add_detail(tf_explanation);
explanation
@@ -165,11 +139,10 @@ impl BM25Weight {
mod tests {
use super::idf;
use crate::{assert_nearly_equals, Score};
use crate::tests::assert_nearly_equals;
#[test]
fn test_idf() {
let score: Score = 2.0;
assert_nearly_equals!(idf(1, 2), score.ln());
assert_nearly_equals(idf(1, 2), 0.6931472);
}
}

View File

@@ -1,434 +0,0 @@
use crate::query::term_query::TermScorer;
use crate::query::Scorer;
use crate::{DocId, DocSet, Score, TERMINATED};
use std::ops::Deref;
use std::ops::DerefMut;
/// Takes a term_scorers sorted by their current doc() and a threshold and returns
/// Returns (pivot_len, pivot_ord) defined as follows:
/// - `pivot_doc` lowest document that has a chance of exceeding (>) the threshold score.
/// - `before_pivot_len` number of term_scorers such that term_scorer.doc() < pivot.
/// - `pivot_len` number of term_scorers such that term_scorer.doc() <= pivot.
///
/// We always have `before_pivot_len` < `pivot_len`.
///
/// None is returned if we establish that no document can exceed the threshold.
fn find_pivot_doc(
term_scorers: &[TermScorerWithMaxScore],
threshold: Score,
) -> Option<(usize, usize, DocId)> {
let mut max_score = 0.0;
let mut before_pivot_len = 0;
let mut pivot_doc = TERMINATED;
while before_pivot_len < term_scorers.len() {
let term_scorer = &term_scorers[before_pivot_len];
max_score += term_scorer.max_score;
if max_score > threshold {
pivot_doc = term_scorer.doc();
break;
}
before_pivot_len += 1;
}
if pivot_doc == TERMINATED {
return None;
}
// Right now i is an ordinal, we want a len.
let mut pivot_len = before_pivot_len + 1;
// Some other term_scorer may be positioned on the same document.
pivot_len += term_scorers[pivot_len..]
.iter()
.take_while(|term_scorer| term_scorer.doc() == pivot_doc)
.count();
Some((before_pivot_len, pivot_len, pivot_doc))
}
// Before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer(
scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_len: usize,
) {
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
let mut scorer_to_seek = pivot_len - 1;
let mut doc_to_seek_after = scorers[scorer_to_seek].doc();
for scorer_ord in (0..pivot_len - 1).rev() {
let scorer = &scorers[scorer_ord];
if scorer.last_doc_in_block() <= doc_to_seek_after {
doc_to_seek_after = scorer.last_doc_in_block();
scorer_to_seek = scorer_ord;
}
}
for scorer in &scorers[pivot_len..] {
if scorer.doc() <= doc_to_seek_after {
doc_to_seek_after = scorer.doc();
}
}
scorers[scorer_to_seek].seek(doc_to_seek_after + 1);
restore_ordering(scorers, scorer_to_seek);
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
}
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
// except term_scorers[ord] that might be in advance compared to its ranks,
// bubble up term_scorers[ord] in order to restore the ordering.
fn restore_ordering(term_scorers: &mut Vec<TermScorerWithMaxScore>, ord: usize) {
let doc = term_scorers[ord].doc();
for i in ord + 1..term_scorers.len() {
if term_scorers[i].doc() >= doc {
break;
}
term_scorers.swap(i, i - 1);
}
debug_assert!(is_sorted(term_scorers.iter().map(|scorer| scorer.doc())));
}
// Attempts to advance all term_scorers between `&term_scorers[0..before_len]` to the pivot.
// If this works, return true.
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
fn align_scorers(
term_scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_doc: DocId,
before_pivot_len: usize,
) -> bool {
debug_assert_ne!(pivot_doc, TERMINATED);
for i in (0..before_pivot_len).rev() {
let new_doc = term_scorers[i].seek(pivot_doc);
if new_doc != pivot_doc {
if new_doc == TERMINATED {
term_scorers.swap_remove(i);
}
// We went past the pivot.
// We just go through the outer loop mechanic (Note that pivot is
// still a possible candidate).
//
// Termination is still guaranteed since we can only consider the same
// pivot at most term_scorers.len() - 1 times.
restore_ordering(term_scorers, i);
return false;
}
}
true
}
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
// Restores the ordering of term_scorers.
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
for term_scorer in &mut term_scorers[..pivot_len] {
term_scorer.advance();
}
// TODO use drain_filter when available.
let mut i = 0;
while i != term_scorers.len() {
if term_scorers[i].doc() == TERMINATED {
term_scorers.swap_remove(i);
} else {
i += 1;
}
}
term_scorers.sort_by_key(|scorer| scorer.doc());
}
pub fn block_wand(
mut scorers: Vec<TermScorer>,
mut threshold: Score,
callback: &mut dyn FnMut(u32, Score) -> Score,
) {
let mut scorers: Vec<TermScorerWithMaxScore> = scorers
.iter_mut()
.map(TermScorerWithMaxScore::from)
.collect();
scorers.sort_by_key(|scorer| scorer.doc());
// At this point we need to ensure that the scorers are sorted!
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
while let Some((before_pivot_len, pivot_len, pivot_doc)) =
find_pivot_doc(&scorers[..], threshold)
{
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
debug_assert_ne!(pivot_doc, TERMINATED);
debug_assert!(before_pivot_len < pivot_len);
let block_max_score_upperbound: Score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| {
scorer.shallow_seek(pivot_doc);
scorer.block_max_score()
})
.sum();
// Beware after shallow advance, skip readers can be in advance compared to
// the segment posting lists.
//
// `block_segment_postings.load_block()` need to be called separately.
if block_max_score_upperbound <= threshold {
// Block max condition was not reached
// We could get away by simply advancing the scorers to DocId + 1 but it would
// be inefficient. The optimization requires proper explanation and was
// isolated in a different function.
block_max_was_too_low_advance_one_scorer(&mut scorers, pivot_len);
continue;
}
// Block max condition is observed.
//
// Let's try and advance all scorers before the pivot to the pivot.
if !align_scorers(&mut scorers, pivot_doc, before_pivot_len) {
// At least of the scorer does not contain the pivot.
//
// Let's stop scoring this pivot and go through the pivot selection again.
// Note that the current pivot is not necessarily a bad candidate and it
// may be picked again.
continue;
}
// At this point, all scorers are positioned on the doc.
let score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| scorer.score())
.sum();
if score > threshold {
threshold = callback(pivot_doc, score);
}
// let's advance all of the scorers that are currently positioned on the pivot.
advance_all_scorers_on_pivot(&mut scorers, pivot_len);
}
}
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: Score,
}
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore { scorer, max_score }
}
}
impl<'a> Deref for TermScorerWithMaxScore<'a> {
type Target = TermScorer;
fn deref(&self) -> &Self::Target {
self.scorer
}
}
impl<'a> DerefMut for TermScorerWithMaxScore<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
}
}
fn is_sorted<I: Iterator<Item = DocId>>(mut it: I) -> bool {
if let Some(first) = it.next() {
let mut prev = first;
for doc in it {
if doc < prev {
return false;
}
prev = doc;
}
}
true
}
#[cfg(test)]
mod tests {
use crate::query::score_combiner::SumCombiner;
use crate::query::term_query::TermScorer;
use crate::query::Union;
use crate::query::{BM25Weight, Scorer};
use crate::{DocId, DocSet, Score, TERMINATED};
use proptest::prelude::*;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::iter;
struct Float(Score);
impl Eq for Float {}
impl PartialEq for Float {
fn eq(&self, other: &Self) -> bool {
self.cmp(&other) == Ordering::Equal
}
}
impl PartialOrd for Float {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Float {
fn cmp(&self, other: &Self) -> Ordering {
other.0.partial_cmp(&self.0).unwrap_or(Ordering::Equal)
}
}
fn nearly_equals(left: Score, right: Score) -> bool {
(left - right).abs() < 0.000001 * (left + right).abs()
}
fn compute_checkpoints_for_each_pruning(
term_scorers: Vec<TermScorer>,
n: usize,
) -> Vec<(DocId, Score)> {
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
let mut limit: Score = 0.0;
super::block_wand(term_scorers, Score::MIN, &mut |doc, score| {
heap.push(Float(score));
if heap.len() > n {
heap.pop().unwrap();
}
if heap.len() == n {
limit = heap.peek().unwrap().0;
}
if !nearly_equals(score, limit) {
checkpoints.push((doc, score));
}
return limit;
});
checkpoints
}
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
let mut scorer: Union<TermScorer, SumCombiner> = Union::from(term_scorers);
let mut limit = Score::MIN;
loop {
if scorer.doc() == TERMINATED {
break;
}
let doc = scorer.doc();
let score = scorer.score();
if score > limit {
heap.push(Float(score));
if heap.len() > n {
heap.pop().unwrap();
}
if heap.len() == n {
limit = heap.peek().unwrap().0;
}
if !nearly_equals(score, limit) {
checkpoints.push((doc, score));
}
}
scorer.advance();
}
checkpoints
}
const MAX_TERM_FREQ: u32 = 100u32;
fn posting_list(max_doc: u32) -> BoxedStrategy<Vec<(DocId, u32)>> {
(1..max_doc + 1)
.prop_flat_map(move |doc_freq| {
(
proptest::bits::bitset::sampled(doc_freq as usize, 0..max_doc as usize),
proptest::collection::vec(1u32..MAX_TERM_FREQ, doc_freq as usize),
)
})
.prop_map(|(docset, term_freqs)| {
docset
.iter()
.map(|doc| doc as u32)
.zip(term_freqs.iter().cloned())
.collect::<Vec<_>>()
})
.boxed()
}
fn gen_term_scorers(num_scorers: usize) -> BoxedStrategy<(Vec<Vec<(DocId, u32)>>, Vec<u32>)> {
(1u32..100u32)
.prop_flat_map(move |max_doc: u32| {
(
proptest::collection::vec(posting_list(max_doc), num_scorers),
proptest::collection::vec(2u32..10u32 * MAX_TERM_FREQ, max_doc as usize),
)
})
.boxed()
}
fn test_block_wand_aux(posting_lists: &[Vec<(DocId, u32)>], fieldnorms: &[u32]) {
// We virtually repeat all docs 64 times in order to emulate blocks of 2 documents
// and surface blogs more easily.
const REPEAT: usize = 64;
let fieldnorms_expanded = fieldnorms
.iter()
.cloned()
.flat_map(|fieldnorm| iter::repeat(fieldnorm).take(REPEAT))
.collect::<Vec<u32>>();
let postings_lists_expanded: Vec<Vec<(DocId, u32)>> = posting_lists
.iter()
.map(|posting_list| {
posting_list
.into_iter()
.cloned()
.flat_map(|(doc, term_freq)| {
(0 as u32..REPEAT as u32).map(move |offset| {
(
doc * (REPEAT as u32) + offset,
if offset == 0 { term_freq } else { 1 },
)
})
})
.collect::<Vec<(DocId, u32)>>()
})
.collect::<Vec<_>>();
let total_fieldnorms: u64 = fieldnorms_expanded
.iter()
.cloned()
.map(|fieldnorm| fieldnorm as u64)
.sum();
let average_fieldnorm = (total_fieldnorms as Score) / (fieldnorms_expanded.len() as Score);
let max_doc = fieldnorms_expanded.len();
let term_scorers: Vec<TermScorer> = postings_lists_expanded
.iter()
.map(|postings| {
let bm25_weight = BM25Weight::for_one_term(
postings.len() as u64,
max_doc as u64,
average_fieldnorm,
);
TermScorer::create_for_test(postings, &fieldnorms_expanded[..], bm25_weight)
})
.collect();
for top_k in 1..4 {
let checkpoints_for_each_pruning =
compute_checkpoints_for_each_pruning(term_scorers.clone(), top_k);
let checkpoints_manual = compute_checkpoints_manual(term_scorers.clone(), top_k);
assert_eq!(checkpoints_for_each_pruning.len(), checkpoints_manual.len());
for (&(left_doc, left_score), &(right_doc, right_score)) in checkpoints_for_each_pruning
.iter()
.zip(checkpoints_manual.iter())
{
assert_eq!(left_doc, right_doc);
assert!(nearly_equals(left_score, right_score));
}
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_block_wand_two_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(2)) {
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_block_wand_three_term_scorers((posting_lists, fieldnorms) in gen_term_scorers(3)) {
test_block_wand_aux(&posting_lists[..], &fieldnorms[..]);
}
}
}

View File

@@ -1,5 +1,4 @@
use crate::core::SegmentReader;
use crate::postings::FreqReadingOption;
use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
use crate::query::term_query::TermScorer;
@@ -15,12 +14,12 @@ use crate::query::{intersect_scorers, Explanation};
use crate::{DocId, Score};
use std::collections::HashMap;
enum SpecializedScorer {
TermUnion(Vec<TermScorer>),
enum SpecializedScorer<TScoreCombiner: ScoreCombiner> {
TermUnion(Union<TermScorer, TScoreCombiner>),
Other(Box<dyn Scorer>),
}
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer<TScoreCombiner>
where
TScoreCombiner: ScoreCombiner,
{
@@ -36,29 +35,20 @@ where
.into_iter()
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
.collect();
if scorers
.iter()
.all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq)
{
// Block wand is only available iff we read frequencies.
return SpecializedScorer::TermUnion(scorers);
} else {
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
scorers,
)));
}
return SpecializedScorer::TermUnion(Union::<TermScorer, TScoreCombiner>::from(
scorers,
));
}
}
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
}
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(scorer: SpecializedScorer) -> Box<dyn Scorer> {
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let union_scorer = Union::<TermScorer, TScoreCombiner>::from(term_scorers);
Box::new(union_scorer)
impl<TScoreCombiner: ScoreCombiner> Into<Box<dyn Scorer>> for SpecializedScorer<TScoreCombiner> {
fn into(self) -> Box<dyn Scorer> {
match self {
Self::TermUnion(union) => Box::new(union),
Self::Other(scorer) => scorer,
}
SpecializedScorer::Other(scorer) => scorer,
}
}
@@ -78,7 +68,7 @@ impl BooleanWeight {
fn per_occur_scorers(
&self,
reader: &SegmentReader,
boost: Score,
boost: f32,
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
for &(ref occur, ref subweight) in &self.weights {
@@ -94,48 +84,47 @@ impl BooleanWeight {
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
&self,
reader: &SegmentReader,
boost: Score,
) -> crate::Result<SpecializedScorer> {
boost: f32,
) -> crate::Result<SpecializedScorer<TScoreCombiner>> {
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
let should_scorer_opt: Option<SpecializedScorer> = per_occur_scorers
let should_scorer_opt: Option<SpecializedScorer<TScoreCombiner>> = per_occur_scorers
.remove(&Occur::Should)
.map(scorer_union::<TScoreCombiner>);
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::MustNot)
.map(scorer_union::<DoNothingCombiner>)
.map(into_box_scorer::<DoNothingCombiner>);
.map(scorer_union::<TScoreCombiner>)
.map(Into::into);
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
.remove(&Occur::Must)
.map(intersect_scorers);
let positive_scorer: SpecializedScorer = match (should_scorer_opt, must_scorer_opt) {
(Some(should_scorer), Some(must_scorer)) => {
if self.scoring_enabled {
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
Box<dyn Scorer>,
Box<dyn Scorer>,
TScoreCombiner,
>::new(
must_scorer,
into_box_scorer::<TScoreCombiner>(should_scorer),
)))
} else {
SpecializedScorer::Other(must_scorer)
let positive_scorer: SpecializedScorer<TScoreCombiner> =
match (should_scorer_opt, must_scorer_opt) {
(Some(should_scorer), Some(must_scorer)) => {
if self.scoring_enabled {
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
Box<dyn Scorer>,
Box<dyn Scorer>,
TScoreCombiner,
>::new(
must_scorer, should_scorer.into()
)))
} else {
SpecializedScorer::Other(must_scorer)
}
}
}
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
(Some(should_scorer), None) => should_scorer,
(None, None) => {
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}
};
(None, Some(must_scorer)) => SpecializedScorer::Other(must_scorer),
(Some(should_scorer), None) => should_scorer,
(None, None) => {
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}
};
if let Some(exclude_scorer) = exclude_scorer_opt {
let positive_scorer_boxed: Box<dyn Scorer> =
into_box_scorer::<TScoreCombiner>(positive_scorer);
let positive_scorer_boxed: Box<dyn Scorer> = positive_scorer.into();
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
positive_scorer_boxed,
exclude_scorer,
@@ -147,7 +136,7 @@ impl BooleanWeight {
}
impl Weight for BooleanWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
if self.weights.is_empty() {
Ok(Box::new(EmptyScorer))
} else if self.weights.len() == 1 {
@@ -159,22 +148,20 @@ impl Weight for BooleanWeight {
}
} else if self.scoring_enabled {
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
.map(|specialized_scorer| {
into_box_scorer::<SumWithCoordsCombiner>(specialized_scorer)
})
.map(Into::into)
} else {
self.complex_scorer::<DoNothingCombiner>(reader, boost)
.map(into_box_scorer::<DoNothingCombiner>)
.map(Into::into)
}
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
if !self.scoring_enabled {
return Ok(Explanation::new("BooleanQuery with no scoring", 1.0));
return Ok(Explanation::new("BooleanQuery with no scoring", 1f32));
}
let mut explanation = Explanation::new("BooleanClause. Sum of ...", scorer.score());
@@ -193,11 +180,9 @@ impl Weight for BooleanWeight {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer =
Union::<TermScorer, SumWithCoordsCombiner>::from(term_scorers);
SpecializedScorer::TermUnion(mut union_scorer) => {
for_each_scorer(&mut union_scorer, callback);
}
SpecializedScorer::Other(mut scorer) => {
@@ -219,14 +204,14 @@ impl Weight for BooleanWeight {
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: Score,
threshold: f32,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0f32)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
super::block_wand(term_scorers, threshold, callback);
SpecializedScorer::TermUnion(mut union_scorer) => {
for_each_pruning_scorer(&mut union_scorer, threshold, callback);
}
SpecializedScorer::Other(mut scorer) => {
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);

View File

@@ -1,17 +1,13 @@
mod block_wand;
mod boolean_query;
mod boolean_weight;
pub(crate) use self::block_wand::block_wand;
pub use self::boolean_query::BooleanQuery;
#[cfg(test)]
mod tests {
use super::*;
use crate::assert_nearly_equals;
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
use crate::collector::TopDocs;
use crate::query::score_combiner::SumWithCoordsCombiner;
use crate::query::term_query::TermScorer;
use crate::query::Intersection;
@@ -22,8 +18,9 @@ mod tests {
use crate::query::Scorer;
use crate::query::TermQuery;
use crate::schema::*;
use crate::tests::assert_nearly_equals;
use crate::Index;
use crate::{DocAddress, DocId, Score};
use crate::{DocAddress, DocId};
fn aux_test_helper() -> (Index, Field) {
let mut schema_builder = Schema::builder();
@@ -32,7 +29,7 @@ mod tests {
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{
index_writer.add_document(doc!(text_field => "a b c"));
index_writer.add_document(doc!(text_field => "a c"));
@@ -61,7 +58,9 @@ mod tests {
let query = query_parser.parse_query("+a").unwrap();
let searcher = index.reader().unwrap().searcher();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<TermScorer>());
}
@@ -73,13 +72,17 @@ mod tests {
{
let query = query_parser.parse_query("+a +b +c").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<Intersection<TermScorer>>());
}
{
let query = query_parser.parse_query("+a +(b c)").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
}
}
@@ -92,7 +95,9 @@ mod tests {
{
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<RequiredOptionalScorer<
Box<dyn Scorer>,
Box<dyn Scorer>,
@@ -102,7 +107,9 @@ mod tests {
{
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, false).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0).unwrap();
let scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert!(scorer.is::<TermScorer>());
}
}
@@ -133,6 +140,7 @@ mod tests {
.map(|doc| doc.1)
.collect::<Vec<DocId>>()
};
{
let boolean_query = BooleanQuery::from(vec![(Occur::Must, make_term_query("a"))]);
assert_eq!(matching_docs(&boolean_query), vec![0, 1, 3]);
@@ -169,54 +177,6 @@ mod tests {
}
}
#[test]
pub fn test_boolean_query_two_excluded() {
let (index, text_field) = aux_test_helper();
let make_term_query = |text: &str| {
let term_query = TermQuery::new(
Term::from_field_text(text_field, text),
IndexRecordOption::Basic,
);
let query: Box<dyn Query> = Box::new(term_query);
query
};
let reader = index.reader().unwrap();
let matching_topdocs = |query: &dyn Query| {
reader
.searcher()
.search(query, &TopDocs::with_limit(3))
.unwrap()
};
let score_doc_4: Score; // score of doc 4 should not be influenced by exclusion
{
let boolean_query_no_excluded =
BooleanQuery::from(vec![(Occur::Must, make_term_query("d"))]);
let topdocs_no_excluded = matching_topdocs(&boolean_query_no_excluded);
assert_eq!(topdocs_no_excluded.len(), 2);
let (top_score, top_doc) = topdocs_no_excluded[0];
assert_eq!(top_doc, DocAddress(0, 4));
assert_eq!(topdocs_no_excluded[1].1, DocAddress(0, 3)); // ignore score of doc 3.
score_doc_4 = top_score;
}
{
let boolean_query_two_excluded = BooleanQuery::from(vec![
(Occur::Must, make_term_query("d")),
(Occur::MustNot, make_term_query("a")),
(Occur::MustNot, make_term_query("b")),
]);
let topdocs_excluded = matching_topdocs(&boolean_query_two_excluded);
assert_eq!(topdocs_excluded.len(), 1);
let (top_score, top_doc) = topdocs_excluded[0];
assert_eq!(top_doc, DocAddress(0, 4));
assert_eq!(top_score, score_doc_4);
}
}
#[test]
pub fn test_boolean_query_with_weight() {
let mut schema_builder = Schema::builder();
@@ -224,7 +184,7 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field => "a b c"));
index_writer.add_document(doc!(text_field => "a c"));
index_writer.add_document(doc!(text_field => "b c"));
@@ -245,17 +205,17 @@ mod tests {
let boolean_weight = boolean_query.weight(&searcher, true).unwrap();
{
let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 1.0)
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 0.84163445);
assert_nearly_equals(boolean_scorer.score(), 0.84163445f32);
}
{
let mut boolean_scorer = boolean_weight
.scorer(searcher.segment_reader(0u32), 2.0)
.scorer(searcher.segment_reader(0u32), 2.0f32)
.unwrap();
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 1.6832689);
assert_nearly_equals(boolean_scorer.score(), 1.6832689f32);
}
}
@@ -285,9 +245,170 @@ mod tests {
(Occur::Must, make_term_query("a")),
(Occur::Must, make_term_query("b")),
]);
let scores = score_docs(&boolean_query);
assert_nearly_equals!(scores[0], 0.977973);
assert_nearly_equals!(scores[1], 0.84699446);
assert_eq!(score_docs(&boolean_query), vec![0.977973, 0.84699446]);
}
}
// motivated by #554
#[test]
fn test_bm25_several_fields() {
let mut schema_builder = Schema::builder();
let title = schema_builder.add_text_field("title", TEXT);
let text = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(
// tf = 1 0
title => "Законы притяжения Оксана Кулакова",
// tf = 1 0
text => "Законы притяжения Оксана Кулакова] \n\nТема: Сексуальное искусство, Женственность\nТип товара: Запись вебинара (аудио)\nПродолжительность: 1,5 часа\n\nСсылка на вебинар:\n ",
));
index_writer.add_document(doc!(
// tf = 1 0
title => "Любимые русские пироги (Оксана Путан)",
// tf = 2 0
text => "http://i95.fastpic.ru/big/2017/0628/9a/615b9c8504d94a3893d7f496ac53539a.jpg \n\nОт издателя\nОксана Путан профессиональный повар, автор кулинарных книг и известный кулинарный блогер. Ее рецепты отличаются практичностью, доступностью и пользуются огромной популярностью в русскоязычном интернете. Это третья книга автора о самом вкусном и ароматном настоящих русских пирогах и выпечке!\nДаже новички на кухне легко готовят по ее рецептам. Оксана описывает процесс приготовления настолько подробно и понятно, что вам остается только наслаждаться готовкой и не тратить время на лишние усилия. Готовьте легко и просто!\n\nhttps://www.ozon.ru/context/detail/id/139872462/"
));
index_writer.add_document(doc!(
// tf = 1 1
title => "PDF Мастер Класс \"Морячок\" (Оксана Лифенко)",
// tf = 0 0
text => "https://i.ibb.co/pzvHrDN/I3d U T6 Gg TM.jpg\nhttps://i.ibb.co/NFrb6v6/N0ls Z9nwjb U.jpg\nВ описание входит штаны, кофта, берет, матросский воротник. Описание продается в формате PDF, состоит из 12 страниц формата А4 и может быть напечатано на любом принтере.\nОписание предназначено для кукол BJD RealPuki от FairyLand, но может подойти и другим подобным куклам. Также вы можете вязать этот наряд из обычной пряжи, и он подойдет для куколок побольше.\nhttps://vk.com/market 95724412?w=product 95724412_2212"
));
for _ in 0..1_000 {
index_writer.add_document(doc!(
title => "a b d e f g",
text => "maitre corbeau sur un arbre perche tenait dans son bec un fromage Maitre rnard par lodeur alleche lui tint a peu pres ce langage."
));
}
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap();
scorer.advance();
let explanation = query.explain(&searcher, DocAddress(0u32, 0u32)).unwrap();
assert_eq!(
explanation.to_pretty_json(),
r#"{
"value": 12.997711,
"description": "BooleanClause. Sum of ...",
"details": [
{
"value": 12.997711,
"description": "BooleanClause. Sum of ...",
"details": [
{
"value": 6.551476,
"description": "TermQuery, product of...",
"details": [
{
"value": 2.2,
"description": "(K1+1)"
},
{
"value": 5.658984,
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
"details": [
{
"value": 3.0,
"description": "n, number of docs containing this term"
},
{
"value": 1003.0,
"description": "N, total number of docs"
}
]
},
{
"value": 0.5262329,
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
"details": [
{
"value": 1.0,
"description": "freq, occurrences of term within document"
},
{
"value": 1.2,
"description": "k1, term saturation parameter"
},
{
"value": 0.75,
"description": "b, length normalization parameter"
},
{
"value": 4.0,
"description": "dl, length of field"
},
{
"value": 5.997009,
"description": "avgdl, average length of field"
}
]
}
]
},
{
"value": 6.446235,
"description": "TermQuery, product of...",
"details": [
{
"value": 2.2,
"description": "(K1+1)"
},
{
"value": 5.9954567,
"description": "idf, computed as log(1 + (N - n + 0.5) / (n + 0.5))",
"details": [
{
"value": 2.0,
"description": "n, number of docs containing this term"
},
{
"value": 1003.0,
"description": "N, total number of docs"
}
]
},
{
"value": 0.4887212,
"description": "freq / (freq + k1 * (1 - b + b * dl / avgdl))",
"details": [
{
"value": 1.0,
"description": "freq, occurrences of term within document"
},
{
"value": 1.2,
"description": "k1, term saturation parameter"
},
{
"value": 0.75,
"description": "b, length normalization parameter"
},
{
"value": 20.0,
"description": "dl, length of field"
},
{
"value": 24.123629,
"description": "avgdl, average length of field"
}
]
}
]
}
]
}
]
}"#
);
}
}

View File

@@ -1,7 +1,7 @@
use crate::fastfield::DeleteBitSet;
use crate::query::explanation::does_not_match;
use crate::query::{Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, Term};
use crate::{DocId, DocSet, Searcher, SegmentReader, Term};
use std::collections::BTreeSet;
use std::fmt;
@@ -12,12 +12,12 @@ use std::fmt;
/// factor.
pub struct BoostQuery {
query: Box<dyn Query>,
boost: Score,
boost: f32,
}
impl BoostQuery {
/// Builds a boost query.
pub fn new(query: Box<dyn Query>, boost: Score) -> BoostQuery {
pub fn new(query: Box<dyn Query>, boost: f32) -> BoostQuery {
BoostQuery { query, boost }
}
}
@@ -55,22 +55,22 @@ impl Query for BoostQuery {
pub(crate) struct BoostWeight {
weight: Box<dyn Weight>,
boost: Score,
boost: f32,
}
impl BoostWeight {
pub fn new(weight: Box<dyn Weight>, boost: Score) -> Self {
pub fn new(weight: Box<dyn Weight>, boost: f32) -> Self {
BoostWeight { weight, boost }
}
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
@@ -88,11 +88,11 @@ impl Weight for BoostWeight {
pub(crate) struct BoostScorer<S: Scorer> {
underlying: S,
boost: Score,
boost: f32,
}
impl<S: Scorer> BoostScorer<S> {
pub fn new(underlying: S, boost: Score) -> BoostScorer<S> {
pub fn new(underlying: S, boost: f32) -> BoostScorer<S> {
BoostScorer { underlying, boost }
}
}
@@ -128,7 +128,7 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
}
impl<S: Scorer> Scorer for BoostScorer<S> {
fn score(&mut self) -> Score {
fn score(&mut self) -> f32 {
self.underlying.score() * self.boost
}
}
@@ -144,7 +144,7 @@ mod tests {
fn test_boost_query_explain() {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(Document::new());
assert!(index_writer.commit().is_ok());
let reader = index.reader().unwrap();

View File

@@ -34,7 +34,7 @@ impl Query for EmptyQuery {
/// It is useful for tests and handling edge cases.
pub struct EmptyWeight;
impl Weight for EmptyWeight {
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, _reader: &SegmentReader, _boost: f32) -> crate::Result<Box<dyn Scorer>> {
Ok(Box::new(EmptyScorer))
}
@@ -64,7 +64,7 @@ impl DocSet for EmptyScorer {
impl Scorer for EmptyScorer {
fn score(&mut self) -> Score {
0.0
0f32
}
}

View File

@@ -3,11 +3,6 @@ use crate::query::Scorer;
use crate::DocId;
use crate::Score;
#[inline(always)]
fn is_within<TDocSetExclude: DocSet>(docset: &mut TDocSetExclude, doc: DocId) -> bool {
docset.doc() <= doc && docset.seek(doc) == doc
}
/// Filters a given `DocSet` by removing the docs from a given `DocSet`.
///
/// The excluding docset has no impact on scoring.
@@ -28,7 +23,8 @@ where
) -> Exclude<TDocSet, TDocSetExclude> {
while underlying_docset.doc() != TERMINATED {
let target = underlying_docset.doc();
if !is_within(&mut excluding_docset, target) {
if excluding_docset.seek(target) != target {
// this document is not excluded.
break;
}
underlying_docset.advance();
@@ -40,30 +36,42 @@ where
}
}
impl<TDocSet, TDocSetExclude> Exclude<TDocSet, TDocSetExclude>
where
TDocSet: DocSet,
TDocSetExclude: DocSet,
{
/// Returns true iff the doc is not removed.
///
/// The method has to be called with non strictly
/// increasing `doc`.
fn accept(&mut self) -> bool {
let doc = self.underlying_docset.doc();
self.excluding_docset.seek(doc) != doc
}
}
impl<TDocSet, TDocSetExclude> DocSet for Exclude<TDocSet, TDocSetExclude>
where
TDocSet: DocSet,
TDocSetExclude: DocSet,
{
fn advance(&mut self) -> DocId {
loop {
let candidate = self.underlying_docset.advance();
if candidate == TERMINATED {
return TERMINATED;
}
if !is_within(&mut self.excluding_docset, candidate) {
return candidate;
while self.underlying_docset.advance() != TERMINATED {
if self.accept() {
return self.doc();
}
}
TERMINATED
}
fn seek(&mut self, target: DocId) -> DocId {
let candidate = self.underlying_docset.seek(target);
if candidate == TERMINATED {
let underlying_seek_result = self.underlying_docset.seek(target);
if underlying_seek_result == TERMINATED {
return TERMINATED;
}
if !is_within(&mut self.excluding_docset, candidate) {
return candidate;
if self.accept() {
return underlying_seek_result;
}
self.advance()
}
@@ -121,7 +129,7 @@ mod tests {
VecDocSet::from(vec![1, 2, 3, 10, 16, 24]),
))
},
vec![5, 8, 10, 15, 24],
vec![1, 2, 5, 8, 10, 15, 24],
);
}

View File

@@ -1,6 +1,5 @@
use crate::{DocId, Score, TantivyError};
use crate::{DocId, TantivyError};
use serde::Serialize;
use std::fmt;
pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
TantivyError::InvalidArgument(format!("Document #({}) does not match", doc))
@@ -13,21 +12,15 @@ pub(crate) fn does_not_match(doc: DocId) -> TantivyError {
/// representation of this tree when debugging a given score.
#[derive(Clone, Serialize)]
pub struct Explanation {
value: Score,
value: f32,
description: String,
#[serde(skip_serializing_if = "Vec::is_empty")]
details: Vec<Explanation>,
}
impl fmt::Debug for Explanation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Explanation({})", self.to_pretty_json())
}
}
impl Explanation {
/// Creates a new explanation object.
pub fn new<T: ToString>(description: T, value: Score) -> Explanation {
pub fn new<T: ToString>(description: T, value: f32) -> Explanation {
Explanation {
value,
description: description.to_string(),
@@ -36,7 +29,7 @@ impl Explanation {
}
/// Returns the value associated to the current node.
pub fn value(&self) -> Score {
pub fn value(&self) -> f32 {
self.value
}
@@ -48,7 +41,7 @@ impl Explanation {
}
/// Shortcut for `self.details.push(Explanation::new(name, value));`
pub fn add_const<T: ToString>(&mut self, name: T, value: Score) {
pub fn add_const<T: ToString>(&mut self, name: T, value: f32) {
self.details.push(Explanation::new(name, value));
}

View File

@@ -163,10 +163,10 @@ impl Query for FuzzyTermQuery {
#[cfg(test)]
mod test {
use super::FuzzyTermQuery;
use crate::assert_nearly_equals;
use crate::collector::TopDocs;
use crate::schema::Schema;
use crate::schema::TEXT;
use crate::tests::assert_nearly_equals;
use crate::Index;
use crate::Term;
@@ -177,7 +177,7 @@ mod test {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!(
country_field => "japan",
));
@@ -199,7 +199,7 @@ mod test {
.unwrap();
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0];
assert_nearly_equals!(1.0, score);
assert_nearly_equals(1f32, score);
}
// fails because non-prefix Levenshtein distance is more than 1 (add 'a' and 'n')
@@ -223,7 +223,7 @@ mod test {
.unwrap();
assert_eq!(top_docs.len(), 1, "Expected only 1 document");
let (score, _) = top_docs[0];
assert_nearly_equals!(1.0, score);
assert_nearly_equals(1f32, score);
}
}
}

View File

@@ -53,8 +53,7 @@ pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>>
}
fn go_to_first_doc<TDocSet: DocSet>(docsets: &mut [TDocSet]) -> DocId {
assert!(!docsets.is_empty());
let mut candidate = docsets.iter().map(TDocSet::doc).max().unwrap();
let mut candidate = 0;
'outer: loop {
for docset in docsets.iter_mut() {
let seek_doc = docset.seek(candidate);
@@ -119,9 +118,7 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
continue 'outer;
}
}
debug_assert_eq!(candidate, self.left.doc());
debug_assert_eq!(candidate, self.right.doc());
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
return candidate;
}
}
@@ -132,10 +129,7 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
for docset in &mut self.others {
docsets.push(docset);
}
let doc = go_to_first_doc(&mut docsets[..]);
debug_assert!(docsets.iter().all(|docset| docset.doc() == doc));
debug_assert!(doc >= target);
doc
go_to_first_doc(&mut docsets[..])
}
fn doc(&self) -> DocId {

View File

@@ -26,7 +26,6 @@ mod weight;
mod vec_docset;
pub(crate) mod score_combiner;
pub(crate) use self::bm25::BM25Weight;
pub use self::intersection::Intersection;
pub use self::union::Union;

View File

@@ -10,13 +10,12 @@ pub use self::phrase_weight::PhraseWeight;
pub mod tests {
use super::*;
use crate::assert_nearly_equals;
use crate::collector::tests::{TEST_COLLECTOR_WITHOUT_SCORE, TEST_COLLECTOR_WITH_SCORE};
use crate::core::Index;
use crate::query::Weight;
use crate::schema::{Schema, Term, TEXT};
use crate::tests::assert_nearly_equals;
use crate::DocAddress;
use crate::DocId;
use crate::{DocAddress, TERMINATED};
pub fn create_index(texts: &[&'static str]) -> Index {
let mut schema_builder = Schema::builder();
@@ -24,7 +23,7 @@ pub mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for &text in texts {
let doc = doc!(text_field=>text);
index_writer.add_document(doc);
@@ -68,23 +67,6 @@ pub mod tests {
assert!(test_query(vec!["g", "a"]).is_empty());
}
#[test]
pub fn test_phrase_query_simple() -> crate::Result<()> {
let index = create_index(&["a b b d c g c", "a b a b c"]);
let text_field = index.schema().get_field("text").unwrap();
let searcher = index.reader()?.searcher();
let terms: Vec<Term> = vec!["a", "b", "c"]
.iter()
.map(|text| Term::from_field_text(text_field, text))
.collect();
let phrase_query = PhraseQuery::new(terms);
let phrase_weight = phrase_query.phrase_weight(&searcher, false)?;
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0)?;
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.advance(), TERMINATED);
Ok(())
}
#[test]
pub fn test_phrase_query_no_score() {
let index = create_index(&[
@@ -135,7 +117,7 @@ pub mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c"));
assert!(index_writer.commit().is_ok());
}
@@ -175,8 +157,8 @@ pub mod tests {
.to_vec()
};
let scores = test_query(vec!["a", "b"]);
assert_nearly_equals!(scores[0], 0.40618482);
assert_nearly_equals!(scores[1], 0.46844664);
assert_nearly_equals(scores[0], 0.40618482);
assert_nearly_equals(scores[1], 0.46844664);
}
#[test] // motivated by #234
@@ -186,7 +168,7 @@ pub mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"b"));
index_writer.add_document(doc!(text_field=>"a b"));
index_writer.add_document(doc!(text_field=>"b a"));
@@ -217,7 +199,7 @@ pub mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b c d e f g h"));
assert!(index_writer.commit().is_ok());
}

View File

@@ -3,7 +3,7 @@ use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::query::bm25::BM25Weight;
use crate::query::{Intersection, Scorer};
use crate::{DocId, Score};
use crate::DocId;
use std::cmp::Ordering;
struct PostingsWithOffset<TPostings> {
@@ -239,7 +239,6 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
}
fn seek(&mut self, target: DocId) -> DocId {
debug_assert!(target >= self.doc());
let doc = self.intersection_docset.seek(target);
if doc == TERMINATED || self.phrase_match() {
return doc;
@@ -257,7 +256,7 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
}
impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
fn score(&mut self) -> Score {
fn score(&mut self) -> f32 {
let doc = self.doc();
let fieldnorm_id = self.fieldnorm_reader.fieldnorm_id(doc);
self.similarity_weight
@@ -267,6 +266,7 @@ impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
#[cfg(test)]
mod tests {
use super::{intersection, intersection_count};
fn test_intersection_sym(left: &[u32], right: &[u32], expected: &[u32]) {

View File

@@ -9,7 +9,7 @@ use crate::query::Weight;
use crate::query::{EmptyScorer, Explanation};
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::Score;
use crate::Result;
use crate::{DocId, DocSet};
pub struct PhraseWeight {
@@ -32,7 +32,7 @@ impl PhraseWeight {
}
}
fn fieldnorm_reader(&self, reader: &SegmentReader) -> crate::Result<FieldNormReader> {
fn fieldnorm_reader(&self, reader: &SegmentReader) -> FieldNormReader {
let field = self.phrase_terms[0].1.field();
reader.get_fieldnorms_reader(field)
}
@@ -40,10 +40,10 @@ impl PhraseWeight {
fn phrase_scorer(
&self,
reader: &SegmentReader,
boost: Score,
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> {
boost: f32,
) -> Result<Option<PhraseScorer<SegmentPostings>>> {
let similarity_weight = self.similarity_weight.boost_by(boost);
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let fieldnorm_reader = self.fieldnorm_reader(reader);
if reader.has_deletes() {
let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms {
@@ -85,7 +85,7 @@ impl PhraseWeight {
}
impl Weight for PhraseWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(Box::new(scorer))
} else {
@@ -93,8 +93,8 @@ impl Weight for PhraseWeight {
}
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader, 1.0f32)?;
if scorer_opt.is_none() {
return Err(does_not_match(doc));
}
@@ -102,7 +102,7 @@ impl Weight for PhraseWeight {
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let fieldnorm_reader = self.fieldnorm_reader(reader);
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
let phrase_count = scorer.phrase_count();
let mut explanation = Explanation::new("Phrase Scorer", scorer.score());
@@ -130,7 +130,7 @@ mod tests {
]);
let phrase_weight = phrase_query.phrase_weight(&searcher, true).unwrap();
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0)
.phrase_scorer(searcher.segment_reader(0u32), 1.0f32)
.unwrap()
.unwrap();
assert_eq!(phrase_scorer.doc(), 1);

View File

@@ -40,7 +40,7 @@ use std::fmt;
///
/// When implementing a new type of `Query`, it is normal to implement a
/// dedicated `Query`, `Weight` and `Scorer`.
pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug {
/// Create the weight associated to a query.
///
/// If scoring is not required, setting `scoring_enabled` to `false`

View File

@@ -2,7 +2,6 @@ use crate::query::Occur;
use crate::schema::Field;
use crate::schema::Term;
use crate::schema::Type;
use crate::Score;
use std::fmt;
use std::ops::Bound;
@@ -22,12 +21,12 @@ pub enum LogicalLiteral {
pub enum LogicalAST {
Clause(Vec<(Occur, LogicalAST)>),
Leaf(Box<LogicalLiteral>),
Boost(Box<LogicalAST>, Score),
Boost(Box<LogicalAST>, f32),
}
impl LogicalAST {
pub fn boost(self, boost: Score) -> LogicalAST {
if (boost - 1.0).abs() < Score::EPSILON {
pub fn boost(self, boost: f32) -> LogicalAST {
if (boost - 1.0f32).abs() < std::f32::EPSILON {
self
} else {
LogicalAST::Boost(Box::new(self), boost)

View File

@@ -12,7 +12,6 @@ use crate::schema::{Facet, IndexRecordOption};
use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager;
use crate::Score;
use std::borrow::Cow;
use std::collections::HashMap;
use std::num::{ParseFloatError, ParseIntError};
@@ -21,48 +20,51 @@ use std::str::FromStr;
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
/// Possible error that may happen when parsing a query.
#[derive(Debug, PartialEq, Eq, Error)]
#[derive(Debug, PartialEq, Eq, Fail)]
pub enum QueryParserError {
/// Error in the query syntax
#[error("Syntax Error")]
#[fail(display = "Syntax Error")]
SyntaxError,
/// `FieldDoesNotExist(field_name: String)`
/// The query references a field that is not in the schema
#[error("File does not exists: '{0:?}'")]
#[fail(display = "File does not exists: '{:?}'", _0)]
FieldDoesNotExist(String),
/// The query contains a term for a `u64` or `i64`-field, but the value
/// is neither.
#[error("Expected a valid integer: '{0:?}'")]
#[fail(display = "Expected a valid integer: '{:?}'", _0)]
ExpectedInt(ParseIntError),
/// The query contains a term for a `f64`-field, but the value
/// is not a f64.
#[error("Invalid query: Only excluding terms given")]
#[fail(display = "Invalid query: Only excluding terms given")]
ExpectedFloat(ParseFloatError),
/// It is forbidden queries that are only "excluding". (e.g. -title:pop)
#[error("Invalid query: Only excluding terms given")]
#[fail(display = "Invalid query: Only excluding terms given")]
AllButQueryForbidden,
/// If no default field is declared, running a query without any
/// field specified is forbbidden.
#[error("No default field declared and no field specified in query")]
#[fail(display = "No default field declared and no field specified in query")]
NoDefaultFieldDeclared,
/// The field searched for is not declared
/// as indexed in the schema.
#[error("The field '{0:?}' is not declared as indexed")]
#[fail(display = "The field '{:?}' is not declared as indexed", _0)]
FieldNotIndexed(String),
/// A phrase query was requested for a field that does not
/// have any positions indexed.
#[error("The field '{0:?}' does not have positions indexed")]
#[fail(display = "The field '{:?}' does not have positions indexed", _0)]
FieldDoesNotHavePositionsIndexed(String),
/// The tokenizer for the given field is unknown
/// The two argument strings are the name of the field, the name of the tokenizer
#[error("The tokenizer '{0:?}' for the field '{1:?}' is unknown")]
#[fail(
display = "The tokenizer '{:?}' for the field '{:?}' is unknown",
_0, _1
)]
UnknownTokenizer(String, String),
/// The query contains a range query with a phrase as one of the bounds.
/// Only terms can be used as bounds.
#[error("A range query cannot have a phrase as one of the bounds")]
#[fail(display = "A range query cannot have a phrase as one of the bounds")]
RangeMustNotHavePhrase,
/// The format for the date field is not RFC 3339 compliant.
#[error("The date field has an invalid format")]
#[fail(display = "The date field has an invalid format")]
DateFormatError(chrono::ParseError),
}
@@ -111,7 +113,7 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
/// The language covered by the current parser is extremely simple.
///
/// * simple terms: "e.g.: `Barack Obama` are simply tokenized using
/// tantivy's [`SimpleTokenizer`](../tokenizer/struct.SimpleTokenizer.html), hence
/// tantivy's [`SimpleTokenizer`](tantivy::tokenizer::SimpleTokenizer), hence
/// becoming `["barack", "obama"]`. The terms are then searched within
/// the default terms of the query parser.
///
@@ -170,7 +172,7 @@ pub struct QueryParser {
default_fields: Vec<Field>,
conjunction_by_default: bool,
tokenizer_manager: TokenizerManager,
boost: HashMap<Field, Score>,
boost: HashMap<Field, f32>,
}
fn all_negative(ast: &LogicalAST) -> bool {
@@ -226,7 +228,7 @@ impl QueryParser {
/// If the query defines a query boost through the query language (e.g: `country:France^3.0`),
/// the two boosts (the one defined in the query, and the one defined in the `QueryParser`)
/// are multiplied together.
pub fn set_field_boost(&mut self, field: Field, boost: Score) {
pub fn set_field_boost(&mut self, field: Field, boost: f32) {
self.boost.insert(field, boost);
}
@@ -438,14 +440,14 @@ impl QueryParser {
}
UserInputAST::Boost(ast, boost) => {
let ast = self.compute_logical_ast_with_occur(*ast)?;
Ok(ast.boost(boost as Score))
Ok(ast.boost(boost))
}
UserInputAST::Leaf(leaf) => self.compute_logical_ast_from_leaf(*leaf),
}
}
fn field_boost(&self, field: Field) -> Score {
self.boost.get(&field).cloned().unwrap_or(1.0)
fn field_boost(&self, field: Field) -> f32 {
self.boost.get(&field).cloned().unwrap_or(1.0f32)
}
fn compute_logical_ast_from_leaf(
@@ -656,7 +658,7 @@ mod test {
let mut query_parser = make_query_parser();
let schema = make_schema();
let text_field = schema.get_field("text").unwrap();
query_parser.set_field_boost(text_field, 2.0);
query_parser.set_field_boost(text_field, 2.0f32);
let query = query_parser.parse_query("text:hello").unwrap();
assert_eq!(
format!("{:?}", query),
@@ -669,7 +671,7 @@ mod test {
let mut query_parser = make_query_parser();
let schema = make_schema();
let title_field = schema.get_field("title").unwrap();
query_parser.set_field_boost(title_field, 2.0);
query_parser.set_field_boost(title_field, 2.0f32);
let query = query_parser.parse_query("title:[A TO B]").unwrap();
assert_eq!(
format!("{:?}", query),
@@ -682,7 +684,7 @@ mod test {
let mut query_parser = make_query_parser();
let schema = make_schema();
let text_field = schema.get_field("text").unwrap();
query_parser.set_field_boost(text_field, 2.0);
query_parser.set_field_boost(text_field, 2.0f32);
let query = query_parser.parse_query("text:hello^2").unwrap();
assert_eq!(
format!("{:?}", query),

View File

@@ -9,7 +9,8 @@ use crate::query::{Query, Scorer, Weight};
use crate::schema::Type;
use crate::schema::{Field, IndexRecordOption, Term};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::{DocId, Score};
use crate::DocId;
use crate::Result;
use std::collections::Bound;
use std::ops::Range;
@@ -47,7 +48,7 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// let schema = schema_builder.build();
///
/// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 10_000_000)?;
/// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
/// for year in 1950u64..2017u64 {
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
/// for _ in 0..num_docs_within_year {
@@ -245,11 +246,7 @@ impl RangeQuery {
}
impl Query for RangeQuery {
fn weight(
&self,
searcher: &Searcher,
_scoring_enabled: bool,
) -> crate::Result<Box<dyn Weight>> {
fn weight(&self, searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<dyn Weight>> {
let schema = searcher.schema();
let value_type = schema.get_field_entry(self.field).field_type().value_type();
if value_type != self.value_type {
@@ -292,7 +289,7 @@ impl RangeWeight {
}
impl Weight for RangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
@@ -304,26 +301,24 @@ impl Weight for RangeWeight {
let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic);
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
for &doc in block_segment_postings.docs() {
doc_bitset.insert(doc);
}
block_segment_postings.advance();
if !block_segment_postings.advance() {
break;
}
}
}
let doc_bitset = BitSetDocSet::from(doc_bitset);
Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0f32)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
Ok(Explanation::new("RangeQuery", 1.0))
Ok(Explanation::new("RangeQuery", 1.0f32))
}
}
@@ -331,9 +326,8 @@ impl Weight for RangeWeight {
mod tests {
use super::RangeQuery;
use crate::collector::{Count, TopDocs};
use crate::query::QueryParser;
use crate::schema::{Document, Field, Schema, INDEXED, TEXT};
use crate::collector::Count;
use crate::schema::{Document, Field, Schema, INDEXED};
use crate::Index;
use std::collections::Bound;
@@ -345,7 +339,7 @@ mod tests {
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
for year in 1950u64..2017u64 {
let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
for _ in 0..num_docs_within_year {
@@ -480,28 +474,4 @@ mod tests {
91
);
}
#[test]
fn test_bug_reproduce_range_query() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT);
schema_builder.add_i64_field("year", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let mut index_writer = index.writer_for_tests()?;
let title = schema.get_field("title").unwrap();
let year = schema.get_field("year").unwrap();
index_writer.add_document(doc!(
title => "hemoglobin blood",
year => 1990 as i64
));
index_writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title]);
let query = query_parser.parse_query("hemoglobin AND year:[1970 TO 1990]")?;
let top_docs = searcher.search(&query, &TopDocs::with_limit(10))?;
assert_eq!(top_docs.len(), 1);
Ok(())
}
}

View File

@@ -89,10 +89,10 @@ impl Query for RegexQuery {
#[cfg(test)]
mod test {
use super::RegexQuery;
use crate::assert_nearly_equals;
use crate::collector::TopDocs;
use crate::schema::TEXT;
use crate::schema::{Field, Schema};
use crate::tests::assert_nearly_equals;
use crate::{Index, IndexReader};
use std::sync::Arc;
use tantivy_fst::Regex;
@@ -103,7 +103,7 @@ mod test {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!(
country_field => "japan",
));
@@ -129,7 +129,7 @@ mod test {
.unwrap();
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
let (score, _) = scored_docs[0];
assert_nearly_equals!(1.0, score);
assert_nearly_equals(1f32, score);
}
let top_docs = searcher
.search(&query_matching_zero, &TopDocs::with_limit(2))

View File

@@ -72,7 +72,7 @@ where
let doc = self.doc();
let mut score_combiner = TScoreCombiner::default();
score_combiner.update(&mut self.req_scorer);
if self.opt_scorer.doc() <= doc && self.opt_scorer.seek(doc) == doc {
if self.opt_scorer.seek(doc) == doc {
score_combiner.update(&mut self.opt_scorer);
}
let score = score_combiner.score();
@@ -112,47 +112,47 @@ mod tests {
fn test_reqopt_scorer() {
let mut reqoptscorer: RequiredOptionalScorer<_, _, SumCombiner> =
RequiredOptionalScorer::new(
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0),
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0),
ConstScorer::new(VecDocSet::from(vec![1, 3, 7, 8, 9, 10, 13, 15]), 1.0f32),
ConstScorer::new(VecDocSet::from(vec![1, 2, 7, 11, 12, 15]), 1.0f32),
);
{
assert_eq!(reqoptscorer.doc(), 1);
assert_eq!(reqoptscorer.score(), 2.0);
assert_eq!(reqoptscorer.score(), 2f32);
}
{
assert_eq!(reqoptscorer.advance(), 3);
assert_eq!(reqoptscorer.doc(), 3);
assert_eq!(reqoptscorer.score(), 1.0);
assert_eq!(reqoptscorer.score(), 1f32);
}
{
assert_eq!(reqoptscorer.advance(), 7);
assert_eq!(reqoptscorer.doc(), 7);
assert_eq!(reqoptscorer.score(), 2.0);
assert_eq!(reqoptscorer.score(), 2f32);
}
{
assert_eq!(reqoptscorer.advance(), 8);
assert_eq!(reqoptscorer.doc(), 8);
assert_eq!(reqoptscorer.score(), 1.0);
assert_eq!(reqoptscorer.score(), 1f32);
}
{
assert_eq!(reqoptscorer.advance(), 9);
assert_eq!(reqoptscorer.doc(), 9);
assert_eq!(reqoptscorer.score(), 1.0);
assert_eq!(reqoptscorer.score(), 1f32);
}
{
assert_eq!(reqoptscorer.advance(), 10);
assert_eq!(reqoptscorer.doc(), 10);
assert_eq!(reqoptscorer.score(), 1.0);
assert_eq!(reqoptscorer.score(), 1f32);
}
{
assert_eq!(reqoptscorer.advance(), 13);
assert_eq!(reqoptscorer.doc(), 13);
assert_eq!(reqoptscorer.score(), 1.0);
assert_eq!(reqoptscorer.score(), 1f32);
}
{
assert_eq!(reqoptscorer.advance(), 15);
assert_eq!(reqoptscorer.doc(), 15);
assert_eq!(reqoptscorer.score(), 2.0);
assert_eq!(reqoptscorer.score(), 2f32);
}
assert_eq!(reqoptscorer.advance(), TERMINATED);
}

View File

@@ -31,7 +31,7 @@ impl ScoreCombiner for DoNothingCombiner {
fn clear(&mut self) {}
fn score(&self) -> Score {
1.0
1f32
}
}
@@ -47,7 +47,7 @@ impl ScoreCombiner for SumCombiner {
}
fn clear(&mut self) {
self.score = 0.0;
self.score = 0f32;
}
fn score(&self) -> Score {
@@ -70,7 +70,7 @@ impl ScoreCombiner for SumWithCoordsCombiner {
}
fn clear(&mut self) {
self.score = 0.0;
self.score = 0f32;
self.num_fields = 0;
}

View File

@@ -35,14 +35,14 @@ pub struct ConstScorer<TDocSet: DocSet> {
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
/// Creates a new `ConstScorer`.
pub fn new(docset: TDocSet, score: Score) -> ConstScorer<TDocSet> {
pub fn new(docset: TDocSet, score: f32) -> ConstScorer<TDocSet> {
ConstScorer { docset, score }
}
}
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
fn from(docset: TDocSet) -> Self {
ConstScorer::new(docset, 1.0)
ConstScorer::new(docset, 1.0f32)
}
}

View File

@@ -9,13 +9,13 @@ pub use self::term_weight::TermWeight;
#[cfg(test)]
mod tests {
use crate::assert_nearly_equals;
use crate::collector::TopDocs;
use crate::docset::DocSet;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::{Query, QueryParser, Scorer, TermQuery};
use crate::schema::{Field, IndexRecordOption, Schema, STRING, TEXT};
use crate::{Index, Term, TERMINATED};
use crate::tests::assert_nearly_equals;
use crate::Index;
use crate::Term;
#[test]
pub fn test_term_query_no_freq() {
@@ -25,7 +25,7 @@ mod tests {
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let doc = doc!(text_field => "a");
index_writer.add_document(doc);
assert!(index_writer.commit().is_ok());
@@ -37,44 +37,9 @@ mod tests {
);
let term_weight = term_query.weight(&searcher, true).unwrap();
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader, 1.0).unwrap();
let mut term_scorer = term_weight.scorer(segment_reader, 1.0f32).unwrap();
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.score(), 0.28768212);
}
#[test]
pub fn test_term_query_multiple_of_block_len() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", STRING);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
// writing the segment
let mut index_writer = index.writer_for_tests()?;
for _ in 0..COMPRESSION_BLOCK_SIZE {
let doc = doc!(text_field => "a");
index_writer.add_document(doc);
}
index_writer.commit()?;
}
let searcher = index.reader()?.searcher();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "a"),
IndexRecordOption::Basic,
);
let term_weight = term_query.weight(&searcher, true)?;
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader, 1.0)?;
for i in 0u32..COMPRESSION_BLOCK_SIZE as u32 {
assert_eq!(term_scorer.doc(), i);
if i == COMPRESSION_BLOCK_SIZE as u32 - 1u32 {
assert_eq!(term_scorer.advance(), TERMINATED);
} else {
assert_eq!(term_scorer.advance(), i + 1);
}
}
assert_eq!(term_scorer.doc(), TERMINATED);
Ok(())
assert_eq!(term_scorer.score(), 0.28768212);
}
#[test]
@@ -86,7 +51,7 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();
index_writer.add_document(doc!(
left_field => "left1 left2 left2 left2f2 left2f2 left3 abcde abcde abcde abcde abcde abcde abcde abcde abcde abcewde abcde abcde",
right_field => "right1 right2",
@@ -104,7 +69,7 @@ mod tests {
.unwrap();
assert_eq!(topdocs.len(), 1);
let (score, _) = topdocs[0];
assert_nearly_equals!(0.77802235, score);
assert_nearly_equals(0.77802235, score);
}
{
let term = Term::from_field_text(left_field, "left1");
@@ -114,9 +79,9 @@ mod tests {
.unwrap();
assert_eq!(top_docs.len(), 2);
let (score1, _) = top_docs[0];
assert_nearly_equals!(0.27101856, score1);
assert_nearly_equals(0.27101856, score1);
let (score2, _) = top_docs[1];
assert_nearly_equals!(0.13736556, score2);
assert_nearly_equals(0.13736556, score2);
}
{
let query_parser = QueryParser::for_index(&index, vec![]);
@@ -124,9 +89,9 @@ mod tests {
let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
assert_eq!(top_docs.len(), 2);
let (score1, _) = top_docs[0];
assert_nearly_equals!(0.9153879, score1);
assert_nearly_equals(0.9153879, score1);
let (score2, _) = top_docs[1];
assert_nearly_equals!(0.27101856, score2);
assert_nearly_equals(0.27101856, score2);
}
}
@@ -136,7 +101,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
let mut index_writer = index.writer_with_num_threads(1, 5_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b"));
index_writer.add_document(doc!(text_field=>"a c"));
index_writer.delete_term(Term::from_field_text(text_field, "b"));
@@ -147,27 +112,6 @@ mod tests {
assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1);
}
#[test]
fn test_term_query_simple_seek() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests().unwrap();
index_writer.add_document(doc!(text_field=>"a"));
index_writer.add_document(doc!(text_field=>"a"));
index_writer.commit()?;
let term_a = Term::from_field_text(text_field, "a");
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
let searcher = index.reader()?.searcher();
let term_weight = term_query.weight(&searcher, false)?;
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek(1u32);
assert_eq!(term_scorer.doc(), 1u32);
Ok(())
}
#[test]
fn test_term_query_debug() {
let term_query = TermQuery::new(

View File

@@ -4,11 +4,10 @@ use crate::DocId;
use crate::Score;
use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::postings::SegmentPostings;
use crate::postings::{FreqReadingOption, Postings};
use crate::query::bm25::BM25Weight;
#[derive(Clone)]
pub struct TermScorer {
postings: SegmentPostings,
fieldnorm_reader: FieldNormReader,
@@ -27,56 +26,9 @@ impl TermScorer {
similarity_weight,
}
}
}
pub(crate) fn shallow_seek(&mut self, target_doc: DocId) {
self.postings.block_cursor.shallow_seek(target_doc);
}
#[cfg(test)]
pub fn create_for_test(
doc_and_tfs: &[(DocId, u32)],
fieldnorms: &[u32],
similarity_weight: BM25Weight,
) -> TermScorer {
assert!(!doc_and_tfs.is_empty());
assert!(
doc_and_tfs
.iter()
.map(|(doc, _tf)| *doc)
.max()
.unwrap_or(0u32)
< fieldnorms.len() as u32
);
let segment_postings =
SegmentPostings::create_from_docs_and_tfs(doc_and_tfs, Some(fieldnorms));
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight)
}
/// See `FreqReadingOption`.
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.postings.block_cursor.freq_reading_option()
}
/// Returns the maximum score for the current block.
///
/// In some rare case, the result may not be exact. In this case a lower value is returned,
/// (and may lead us to return a lesser document).
///
/// At index time, we store the (fieldnorm_id, term frequency) pair that maximizes the
/// score assuming the average fieldnorm computed on this segment.
///
/// Though extremely rare, it is theoretically possible that the actual average fieldnorm
/// is different enough from the current segment average fieldnorm that the maximum over a
/// specific is achieved on a different document.
///
/// (The result is on the other hand guaranteed to be correct if there is only one segment).
pub fn block_max_score(&mut self) -> Score {
self.postings
.block_cursor
.block_max_score(&self.fieldnorm_reader, &self.similarity_weight)
}
impl TermScorer {
pub fn term_freq(&self) -> u32 {
self.postings.term_freq()
}
@@ -90,14 +42,6 @@ impl TermScorer {
let term_freq = self.term_freq();
self.similarity_weight.explain(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> Score {
self.similarity_weight.max_score()
}
pub fn last_doc_in_block(&self) -> DocId {
self.postings.block_cursor.skip_reader.last_doc_in_block()
}
}
impl DocSet for TermScorer {
@@ -125,213 +69,3 @@ impl Scorer for TermScorer {
self.similarity_weight.score(fieldnorm_id, term_freq)
}
}
#[cfg(test)]
mod tests {
use crate::merge_policy::NoMergePolicy;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::term_query::TermScorer;
use crate::query::{BM25Weight, Scorer, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TEXT};
use crate::Score;
use crate::{assert_nearly_equals, Index, Searcher, SegmentId, Term};
use crate::{DocId, DocSet, TERMINATED};
use futures::executor::block_on;
use proptest::prelude::*;
#[test]
fn test_term_scorer_max_score() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(3, 6, 10.0);
let mut term_scorer = TermScorer::create_for_test(
&[(2, 3), (3, 12), (7, 8)],
&[0, 0, 10, 12, 0, 0, 0, 100],
bm25_weight,
);
let max_scorer = term_scorer.max_score();
crate::assert_nearly_equals!(max_scorer, 1.3990127);
assert_eq!(term_scorer.doc(), 2);
assert_eq!(term_scorer.term_freq(), 3);
assert_nearly_equals!(term_scorer.block_max_score(), 1.3676447);
assert_nearly_equals!(term_scorer.score(), 1.0892314);
assert_eq!(term_scorer.advance(), 3);
assert_eq!(term_scorer.doc(), 3);
assert_eq!(term_scorer.term_freq(), 12);
assert_nearly_equals!(term_scorer.score(), 1.3676447);
assert_eq!(term_scorer.advance(), 7);
assert_eq!(term_scorer.doc(), 7);
assert_eq!(term_scorer.term_freq(), 8);
assert_nearly_equals!(term_scorer.score(), 0.72015285);
assert_eq!(term_scorer.advance(), TERMINATED);
Ok(())
}
#[test]
fn test_term_scorer_shallow_advance() -> crate::Result<()> {
let bm25_weight = BM25Weight::for_one_term(300, 1024, 10.0);
let mut doc_and_tfs = vec![];
for i in 0u32..300u32 {
let doc = i * 10;
doc_and_tfs.push((doc, 1u32 + doc % 3u32));
}
let fieldnorms: Vec<u32> = std::iter::repeat(10u32).take(3_000).collect();
let mut term_scorer = TermScorer::create_for_test(&doc_and_tfs, &fieldnorms, bm25_weight);
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.shallow_seek(1289);
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek(1289);
assert_eq!(term_scorer.doc(), 1290);
Ok(())
}
proptest! {
#[test]
fn test_term_scorer_block_max_score(term_freqs_fieldnorms in proptest::collection::vec((1u32..10u32, 0u32..100u32), 80..300)) {
let term_doc_freq = term_freqs_fieldnorms.len();
let doc_tfs: Vec<(u32, u32)> = term_freqs_fieldnorms.iter()
.cloned()
.enumerate()
.map(|(doc, (tf, _))| (doc as u32, tf))
.collect();
let mut fieldnorms: Vec<u32> = vec![];
for i in 0..term_doc_freq {
let (tf, num_extra_terms) = term_freqs_fieldnorms[i];
fieldnorms.push(tf + num_extra_terms);
}
let average_fieldnorm = fieldnorms
.iter()
.cloned()
.sum::<u32>() as Score / term_doc_freq as Score;
// Average fieldnorm is over the entire index,
// not necessarily the docs that are in the posting list.
// For this reason we multiply by 1.1 to make a realistic value.
let bm25_weight = BM25Weight::for_one_term(term_doc_freq as u64,
term_doc_freq as u64 * 10u64,
average_fieldnorm);
let mut term_scorer =
TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
let docs: Vec<DocId> = (0..term_doc_freq).map(|doc| doc as DocId).collect();
for block in docs.chunks(COMPRESSION_BLOCK_SIZE) {
let block_max_score: Score = term_scorer.block_max_score();
let mut block_max_score_computed: Score = 0.0;
for &doc in block {
assert_eq!(term_scorer.doc(), doc);
block_max_score_computed = block_max_score_computed.max(term_scorer.score());
term_scorer.advance();
}
assert_nearly_equals!(block_max_score_computed, block_max_score);
}
}
}
#[test]
fn test_block_wand() {
let mut doc_tfs: Vec<(u32, u32)> = vec![];
for doc in 0u32..128u32 {
doc_tfs.push((doc, 1u32));
}
for doc in 128u32..256u32 {
doc_tfs.push((doc, if doc == 200 { 2u32 } else { 1u32 }));
}
doc_tfs.push((256, 1u32));
doc_tfs.push((257, 3u32));
doc_tfs.push((258, 1u32));
let fieldnorms: Vec<u32> = std::iter::repeat(20u32).take(300).collect();
let bm25_weight = BM25Weight::for_one_term(10, 129, 20.0);
let mut docs = TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
assert_nearly_equals!(docs.block_max_score(), 2.5161593);
docs.shallow_seek(135);
assert_nearly_equals!(docs.block_max_score(), 3.4597192);
docs.shallow_seek(256);
// the block is not loaded yet.
assert_nearly_equals!(docs.block_max_score(), 5.2971773);
assert_eq!(256, docs.seek(256));
assert_nearly_equals!(docs.block_max_score(), 3.9539647);
}
fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
let term_weight = term_query.specialized_weight(&searcher, true);
for reader in searcher.segment_readers() {
let mut block_max_scores = vec![];
let mut block_max_scores_b = vec![];
let mut docs = vec![];
{
let mut term_scorer = term_weight.specialized_scorer(reader, 1.0)?;
while term_scorer.doc() != TERMINATED {
let mut score = term_scorer.score();
docs.push(term_scorer.doc());
for _ in 0..128 {
score = score.max(term_scorer.score());
if term_scorer.advance() == TERMINATED {
break;
}
}
block_max_scores.push(score);
}
}
{
let mut term_scorer = term_weight.specialized_scorer(reader, 1.0)?;
for d in docs {
term_scorer.shallow_seek(d);
block_max_scores_b.push(term_scorer.block_max_score());
}
}
for (l, r) in block_max_scores
.iter()
.cloned()
.zip(block_max_scores_b.iter().cloned())
{
assert_nearly_equals!(l, r);
}
}
Ok(())
}
#[ignore]
#[test]
fn test_block_wand_long_test() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer = index.writer_with_num_threads(3, 30_000_000)?;
use rand::Rng;
let mut rng = rand::thread_rng();
writer.set_merge_policy(Box::new(NoMergePolicy));
for _ in 0..3_000 {
let term_freq = rng.gen_range(1, 10000);
let words: Vec<&str> = std::iter::repeat("bbbb").take(term_freq).collect();
let text = words.join(" ");
writer.add_document(doc!(text_field=>text));
}
writer.commit()?;
let term_query = TermQuery::new(
Term::from_field_text(text_field, &"bbbb"),
IndexRecordOption::WithFreqs,
);
let segment_ids: Vec<SegmentId>;
let reader = index.reader()?;
{
let searcher = reader.searcher();
segment_ids = searcher
.segment_readers()
.iter()
.map(|segment| segment.segment_id())
.collect();
test_block_wand_aux(&term_query, &searcher)?;
}
{
let _ = block_on(writer.merge(&segment_ids[..]));
}
{
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
test_block_wand_aux(&term_query, &searcher)?;
}
Ok(())
}
}

View File

@@ -4,10 +4,11 @@ use crate::docset::DocSet;
use crate::postings::SegmentPostings;
use crate::query::bm25::BM25Weight;
use crate::query::explanation::does_not_match;
use crate::query::weight::for_each_scorer;
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
use crate::query::Weight;
use crate::query::{Explanation, Scorer};
use crate::schema::IndexRecordOption;
use crate::Result;
use crate::Term;
use crate::{DocId, Score};
@@ -18,22 +19,22 @@ pub struct TermWeight {
}
impl Weight for TermWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let term_scorer = self.specialized_scorer(reader, boost)?;
fn scorer(&self, reader: &SegmentReader, boost: f32) -> Result<Box<dyn Scorer>> {
let term_scorer = self.scorer_specialized(reader, boost)?;
Ok(Box::new(term_scorer))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.specialized_scorer(reader, 1.0)?;
fn explain(&self, reader: &SegmentReader, doc: DocId) -> Result<Explanation> {
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
Ok(scorer.explain())
}
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
fn count(&self, reader: &SegmentReader) -> Result<u32> {
if let Some(delete_bitset) = reader.delete_bitset() {
Ok(self.scorer(reader, 1.0)?.count(delete_bitset))
Ok(self.scorer(reader, 1.0f32)?.count(delete_bitset))
} else {
let field = self.term.field();
Ok(reader
@@ -51,7 +52,7 @@ impl Weight for TermWeight {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.specialized_scorer(reader, 1.0)?;
let mut scorer = self.scorer_specialized(reader, 1.0f32)?;
for_each_scorer(&mut scorer, callback);
Ok(())
}
@@ -68,12 +69,12 @@ impl Weight for TermWeight {
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: Score,
threshold: f32,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let scorer = self.specialized_scorer(reader, 1.0)?;
crate::query::boolean_query::block_wand(vec![scorer], threshold, callback);
let mut scorer = self.scorer(reader, 1.0f32)?;
for_each_pruning_scorer(&mut scorer, threshold, callback);
Ok(())
}
}
@@ -91,14 +92,10 @@ impl TermWeight {
}
}
pub(crate) fn specialized_scorer(
&self,
reader: &SegmentReader,
boost: Score,
) -> crate::Result<TermScorer> {
fn scorer_specialized(&self, reader: &SegmentReader, boost: f32) -> Result<TermScorer> {
let field = self.term.field();
let inverted_index = reader.inverted_index(field);
let fieldnorm_reader = reader.get_fieldnorms_reader(field)?;
let fieldnorm_reader = reader.get_fieldnorms_reader(field);
let similarity_weight = self.similarity_weight.boost_by(boost);
let postings_opt: Option<SegmentPostings> =
inverted_index.read_postings(&self.term, self.index_record_option);

View File

@@ -55,7 +55,7 @@ where
cursor: HORIZON_NUM_TINYBITSETS,
offset: 0,
doc: 0,
score: 0.0,
score: 0f32,
};
if union.refill() {
union.advance();
@@ -183,10 +183,7 @@ where
// advance all docsets to a doc >= to the target.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))]
unordered_drain_filter(&mut self.docsets, |docset| {
if docset.doc() < target {
docset.seek(target);
}
docset.doc() == TERMINATED
docset.seek(target) == TERMINATED
});
// at this point all of the docsets
@@ -274,7 +271,7 @@ mod tests {
vals.iter()
.cloned()
.map(VecDocSet::from)
.map(|docset| ConstScorer::new(docset, 1.0))
.map(|docset| ConstScorer::new(docset, 1.0f32))
.collect::<Vec<ConstScorer<VecDocSet>>>(),
)
};
@@ -321,7 +318,7 @@ mod tests {
.iter()
.map(|docs| docs.clone())
.map(VecDocSet::from)
.map(|docset| ConstScorer::new(docset, 1.0))
.map(|docset| ConstScorer::new(docset, 1.0f32))
.collect::<Vec<_>>(),
));
res
@@ -398,9 +395,9 @@ mod bench {
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{ConstScorer, Union, VecDocSet};
use crate::tests;
use crate::DocId;
use crate::DocSet;
use crate::{tests, TERMINATED};
use test::Bencher;
#[bench]
@@ -414,12 +411,10 @@ mod bench {
union_docset
.iter()
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
.map(|docset| ConstScorer::new(docset, 1.0))
.map(ConstScorer::new)
.collect::<Vec<_>>(),
);
while v.doc() != TERMINATED {
v.advance();
}
while v.advance() {}
});
}
#[bench]
@@ -434,12 +429,10 @@ mod bench {
union_docset
.iter()
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
.map(|docset| ConstScorer::new(docset, 1.0))
.map(ConstScorer::new)
.collect::<Vec<_>>(),
);
while v.doc() != TERMINATED {
v.advance();
}
while v.advance() {}
});
}
}

View File

@@ -28,7 +28,7 @@ pub(crate) fn for_each_scorer<TScorer: Scorer + ?Sized>(
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer + ?Sized>(
scorer: &mut TScorer,
mut threshold: Score,
mut threshold: f32,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
@@ -51,14 +51,14 @@ pub trait Weight: Send + Sync + 'static {
/// `boost` is a multiplier to apply to the score.
///
/// See [`Query`](./trait.Query.html).
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
fn scorer(&self, reader: &SegmentReader, boost: f32) -> crate::Result<Box<dyn Scorer>>;
/// Returns an `Explanation` for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given `SegmentReader`.
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
if let Some(delete_bitset) = reader.delete_bitset() {
Ok(scorer.count(delete_bitset))
} else {
@@ -73,7 +73,7 @@ pub trait Weight: Send + Sync + 'static {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
for_each_scorer(scorer.as_mut(), callback);
Ok(())
}
@@ -90,11 +90,11 @@ pub trait Weight: Send + Sync + 'static {
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&self,
threshold: Score,
threshold: f32,
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0f32)?;
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}

View File

@@ -1,7 +1,5 @@
mod pool;
use slog::error;
pub use self::pool::LeasedItem;
use self::pool::Pool;
use crate::core::Segment;
@@ -24,7 +22,7 @@ pub enum ReloadPolicy {
/// The index is entirely reloaded manually.
/// All updates of the index should be manual.
///
/// No change is reflected automatically. You are required to call `IndexReader::reload()` manually.
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
Manual,
/// The index is reloaded within milliseconds after a new commit is available.
/// This is made possible by watching changes in the `meta.json` file.
@@ -64,7 +62,6 @@ impl IndexReaderBuilder {
/// to open different segment readers. It may take hundreds of milliseconds
/// of time and it may return an error.
pub fn try_into(self) -> crate::Result<IndexReader> {
let logger = self.index.logger().clone();
let inner_reader = InnerIndexReader {
index: self.index,
num_searchers: self.num_searchers,
@@ -83,8 +80,8 @@ impl IndexReaderBuilder {
let callback = move || {
if let Err(err) = inner_reader_arc_clone.reload() {
error!(
logger,
"Error while loading searcher after commit was detected. {:?}", err
"Error while loading searcher after commit was detected. {:?}",
err
);
}
};
@@ -141,11 +138,9 @@ impl InnerIndexReader {
.collect::<crate::Result<_>>()?
};
let schema = self.index.schema();
let searchers = std::iter::repeat_with(|| {
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
})
.take(self.num_searchers)
.collect();
let searchers = (0..self.num_searchers)
.map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}

View File

@@ -74,8 +74,9 @@ impl Document {
}
/// Add a text field.
pub fn add_text<S: ToString>(&mut self, field: Field, text: S) {
self.add(FieldValue::new(field, Value::Str(text.to_string())));
pub fn add_text(&mut self, field: Field, text: &str) {
let value = Value::Str(String::from(text));
self.add(FieldValue::new(field, value));
}
/// Add a pre-tokenized text field.
@@ -109,8 +110,8 @@ impl Document {
}
/// Add a bytes field
pub fn add_bytes<T: Into<Vec<u8>>>(&mut self, field: Field, value: T) {
self.add(FieldValue::new(field, Value::Bytes(value.into())))
pub fn add_bytes(&mut self, field: Field, value: Vec<u8>) {
self.add(FieldValue::new(field, Value::Bytes(value)))
}
/// Add a field value

Some files were not shown because too many files have changed in this diff Show More