Compare commits

..

6 Commits

Author SHA1 Message Date
Paul Masurel
b86caeefe2 Major bugfix in intersection
A bug was added with the `seek_into_the_danger_zone()` optimization

(Spotted and fixed by Stu)

The contract says seek_into_the_danger_zone returns true if do is part of the docset.

The blanket implementation goes like this.

```
let current_doc = self.doc();
if current_doc < target {
     self.seek(target);
}
self.doc() == target
```

So it will return true if target is TERMINATED, where really TERMINATED does not belong to the docset.


The fix tries to clarify the contracts and fixes the intersection algorithm.
We observe a small but all over the board improvement in intersection performance.

---------

Co-authored-by: Stu Hood <stuhood@gmail.com>
Co-authored-by: Paul Masurel <paul.masurel@datadoghq.com>
2026-01-23 18:44:10 +01:00
ChangRui-Ryan
abf1e64f4d add benchmark for string search and get (#2795) 2026-01-19 11:50:41 +01:00
trinity-1686a
12977bc7c4 upgrade some dependancies (#2802)
including rand, which had a few breaking changes
2026-01-14 10:19:09 +01:00
trinity-1686a
0c94eb94c3 Merge pull request #2799 from jollygreenlaser/lru 2026-01-13 22:47:35 +01:00
Paul Masurel
c92e831dde Minor refactoring in PostingsSerializer (#2801)
Removes the Write generics argument in PostingsSerializer.
This removes useless generic.
Prepares the path for codecs.
Removes one useless CountingWrite layer.
etc.

Co-authored-by: Paul Masurel <paul.masurel@datadoghq.com>
2026-01-12 13:53:43 +01:00
Alex Lazar
947c0d5f40 Bump lru to 0.16.3 per dependabot 2026-01-09 23:25:51 -08:00
103 changed files with 2348 additions and 2487 deletions

View File

@@ -27,7 +27,7 @@ regex = { version = "1.5.5", default-features = false, features = [
aho-corasick = "1.0"
tantivy-fst = "0.5"
memmap2 = { version = "0.9.0", optional = true }
lz4_flex = { version = "0.11", default-features = false, optional = true }
lz4_flex = { version = "0.12", default-features = false, optional = true }
zstd = { version = "0.13", optional = true, default-features = false }
tempfile = { version = "3.12.0", optional = true }
log = "0.4.16"
@@ -50,7 +50,7 @@ fail = { version = "0.5.0", optional = true }
time = { version = "0.3.35", features = ["serde-well-known"] }
smallvec = "1.8.0"
rayon = "1.5.2"
lru = "0.12.0"
lru = "0.16.3"
fastdivide = "0.4.0"
itertools = "0.14.0"
measure_time = "0.9.0"
@@ -76,7 +76,7 @@ winapi = "0.3.9"
[dev-dependencies]
binggan = "0.14.2"
rand = "0.8.5"
rand = "0.9"
maplit = "1.0.2"
matches = "0.1.9"
pretty_assertions = "1.2.1"
@@ -85,7 +85,7 @@ test-log = "0.2.10"
futures = "0.3.21"
paste = "1.0.11"
more-asserts = "0.3.1"
rand_distr = "0.4.3"
rand_distr = "0.5"
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
postcard = { version = "1.0.4", features = [
"use-std",
@@ -189,3 +189,7 @@ harness = false
[[bench]]
name = "bool_queries_with_range"
harness = false
[[bench]]
name = "str_search_and_get"
harness = false

View File

@@ -1,8 +1,8 @@
use binggan::plugins::PeakMemAllocPlugin;
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
use rand::distributions::WeightedIndex;
use rand::prelude::SliceRandom;
use rand::distr::weighted::WeightedIndex;
use rand::rngs::StdRng;
use rand::seq::IndexedRandom;
use rand::{Rng, SeedableRng};
use rand_distr::Distribution;
use serde_json::json;
@@ -532,7 +532,7 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
// Prepare 1000 unique terms sampled using a Zipf distribution.
// Exponent ~1.1 approximates top-20 terms covering around ~20%.
let terms_1000: Vec<String> = (1..=1000).map(|i| format!("term_{i}")).collect();
let zipf_1000 = rand_distr::Zipf::new(1000, 1.1f64).unwrap();
let zipf_1000 = rand_distr::Zipf::new(1000.0, 1.1f64).unwrap();
{
let mut rng = StdRng::from_seed([1u8; 32]);
@@ -576,8 +576,8 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
}
let _val_max = 1_000_000.0;
for _ in 0..doc_with_value {
let val: f64 = rng.gen_range(0.0..1_000_000.0);
let json = if rng.gen_bool(0.1) {
let val: f64 = rng.random_range(0.0..1_000_000.0);
let json = if rng.random_bool(0.1) {
// 10% are numeric values
json!({ "mixed_type": val })
} else {
@@ -586,7 +586,7 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
index_writer.add_document(doc!(
text_field => "cool",
json_field => json,
text_field_all_unique_terms => format!("unique_term_{}", rng.gen::<u64>()),
text_field_all_unique_terms => format!("unique_term_{}", rng.random::<u64>()),
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
text_field_few_terms_status => status_field_data[log_level_distribution.sample(&mut rng)].0,
text_field_1000_terms_zipf => terms_1000[zipf_1000.sample(&mut rng) as usize - 1].as_str(),

View File

@@ -55,29 +55,29 @@ fn build_shared_indices(num_docs: usize, p_a: f32, p_b: f32, p_c: f32) -> (Bench
{
let mut writer = index.writer_with_num_threads(1, 500_000_000).unwrap();
for _ in 0..num_docs {
let has_a = rng.gen_bool(p_a as f64);
let has_b = rng.gen_bool(p_b as f64);
let has_c = rng.gen_bool(p_c as f64);
let score = rng.gen_range(0u64..100u64);
let score2 = rng.gen_range(0u64..100_000u64);
let has_a = rng.random_bool(p_a as f64);
let has_b = rng.random_bool(p_b as f64);
let has_c = rng.random_bool(p_c as f64);
let score = rng.random_range(0u64..100u64);
let score2 = rng.random_range(0u64..100_000u64);
let mut title_tokens: Vec<&str> = Vec::new();
let mut body_tokens: Vec<&str> = Vec::new();
if has_a {
if rng.gen_bool(0.1) {
if rng.random_bool(0.1) {
title_tokens.push("a");
} else {
body_tokens.push("a");
}
}
if has_b {
if rng.gen_bool(0.1) {
if rng.random_bool(0.1) {
title_tokens.push("b");
} else {
body_tokens.push("b");
}
}
if has_c {
if rng.gen_bool(0.1) {
if rng.random_bool(0.1) {
title_tokens.push("c");
} else {
body_tokens.push("c");

View File

@@ -36,13 +36,13 @@ fn build_shared_indices(num_docs: usize, p_title_a: f32, distribution: &str) ->
"dense" => {
for doc_id in 0..num_docs {
// Always add title to avoid empty documents
let title_token = if rng.gen_bool(p_title_a as f64) {
let title_token = if rng.random_bool(p_title_a as f64) {
"a"
} else {
"b"
};
let num_rand = rng.gen_range(0u64..1000u64);
let num_rand = rng.random_range(0u64..1000u64);
let num_asc = (doc_id / 10000) as u64;
@@ -60,13 +60,13 @@ fn build_shared_indices(num_docs: usize, p_title_a: f32, distribution: &str) ->
"sparse" => {
for doc_id in 0..num_docs {
// Always add title to avoid empty documents
let title_token = if rng.gen_bool(p_title_a as f64) {
let title_token = if rng.random_bool(p_title_a as f64) {
"a"
} else {
"b"
};
let num_rand = rng.gen_range(0u64..10000000u64);
let num_rand = rng.random_range(0u64..10000000u64);
let num_asc = doc_id as u64;

View File

@@ -33,7 +33,7 @@ fn build_shared_indices(num_docs: usize, distribution: &str) -> BenchIndex {
match distribution {
"dense" => {
for doc_id in 0..num_docs {
let num_rand = rng.gen_range(0u64..1000u64);
let num_rand = rng.random_range(0u64..1000u64);
let num_asc = (doc_id / 10000) as u64;
writer
@@ -46,7 +46,7 @@ fn build_shared_indices(num_docs: usize, distribution: &str) -> BenchIndex {
}
"sparse" => {
for doc_id in 0..num_docs {
let num_rand = rng.gen_range(0u64..10000000u64);
let num_rand = rng.random_range(0u64..10000000u64);
let num_asc = doc_id as u64;
writer

View File

@@ -97,20 +97,20 @@ fn get_index_0_to_100() -> Index {
let num_vals = 100_000;
let docs: Vec<_> = (0..num_vals)
.map(|_i| {
let id_name = if rng.gen_bool(0.01) {
let id_name = if rng.random_bool(0.01) {
"veryfew".to_string() // 1%
} else if rng.gen_bool(0.1) {
} else if rng.random_bool(0.1) {
"few".to_string() // 9%
} else {
"most".to_string() // 90%
};
Doc {
id_name,
id: rng.gen_range(0..100),
id: rng.random_range(0..100),
// Multiply by 1000, so that we create most buckets in the compact space
// The benches depend on this range to select n-percent of elements with the
// methods below.
ip: Ipv6Addr::from_u128(rng.gen_range(0..100) * 1000),
ip: Ipv6Addr::from_u128(rng.random_range(0..100) * 1000),
}
})
.collect();

View File

@@ -0,0 +1,421 @@
// This benchmark compares different approaches for retrieving string values:
//
// 1. Fast Field Approach: retrieves string values via term_ords() and ord_to_str()
//
// 2. Doc Store Approach: retrieves string values via searcher.doc() and field extraction
//
// The benchmark includes various data distributions:
// - Dense Sequential: Sequential document IDs with dense data
// - Dense Random: Random document IDs with dense data
// - Sparse Sequential: Sequential document IDs with sparse data
// - Sparse Random: Random document IDs with sparse data
use std::ops::Bound;
use binggan::{black_box, BenchGroup, BenchRunner};
use rand::prelude::*;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tantivy::collector::{Count, DocSetCollector};
use tantivy::query::RangeQuery;
use tantivy::schema::document::TantivyDocument;
use tantivy::schema::{Schema, Value, FAST, STORED, STRING};
use tantivy::{doc, Index, ReloadPolicy, Searcher, Term};
#[derive(Clone)]
struct BenchIndex {
#[allow(dead_code)]
index: Index,
searcher: Searcher,
}
fn build_shared_indices(num_docs: usize, distribution: &str) -> BenchIndex {
// Schema with string fast field and stored field for doc access
let mut schema_builder = Schema::builder();
let f_str_fast = schema_builder.add_text_field("str_fast", STRING | STORED | FAST);
let f_str_stored = schema_builder.add_text_field("str_stored", STRING | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
// Populate index with stable RNG for reproducibility.
let mut rng = StdRng::from_seed([7u8; 32]);
{
let mut writer = index.writer_with_num_threads(1, 4_000_000_000).unwrap();
match distribution {
"dense_random" => {
for _doc_id in 0..num_docs {
let suffix = rng.gen_range(0u64..1000u64);
let str_val = format!("str_{:03}", suffix);
writer
.add_document(doc!(
f_str_fast=>str_val.clone(),
f_str_stored=>str_val,
))
.unwrap();
}
}
"dense_sequential" => {
for doc_id in 0..num_docs {
let suffix = doc_id as u64 % 1000;
let str_val = format!("str_{:03}", suffix);
writer
.add_document(doc!(
f_str_fast=>str_val.clone(),
f_str_stored=>str_val,
))
.unwrap();
}
}
"sparse_random" => {
for _doc_id in 0..num_docs {
let suffix = rng.gen_range(0u64..1000000u64);
let str_val = format!("str_{:07}", suffix);
writer
.add_document(doc!(
f_str_fast=>str_val.clone(),
f_str_stored=>str_val,
))
.unwrap();
}
}
"sparse_sequential" => {
for doc_id in 0..num_docs {
let suffix = doc_id as u64;
let str_val = format!("str_{:07}", suffix);
writer
.add_document(doc!(
f_str_fast=>str_val.clone(),
f_str_stored=>str_val,
))
.unwrap();
}
}
_ => {
panic!("Unsupported distribution type");
}
}
writer.commit().unwrap();
}
// Prepare reader/searcher once.
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let searcher = reader.searcher();
BenchIndex { index, searcher }
}
fn main() {
// Prepare corpora with varying scenarios
let scenarios = vec![
(
"dense_random_search_low_range".to_string(),
1_000_000,
"dense_random",
0,
9,
),
(
"dense_random_search_high_range".to_string(),
1_000_000,
"dense_random",
990,
999,
),
(
"dense_sequential_search_low_range".to_string(),
1_000_000,
"dense_sequential",
0,
9,
),
(
"dense_sequential_search_high_range".to_string(),
1_000_000,
"dense_sequential",
990,
999,
),
(
"sparse_random_search_low_range".to_string(),
1_000_000,
"sparse_random",
0,
9999,
),
(
"sparse_random_search_high_range".to_string(),
1_000_000,
"sparse_random",
990_000,
999_999,
),
(
"sparse_sequential_search_low_range".to_string(),
1_000_000,
"sparse_sequential",
0,
9999,
),
(
"sparse_sequential_search_high_range".to_string(),
1_000_000,
"sparse_sequential",
990_000,
999_999,
),
];
let mut runner = BenchRunner::new();
for (scenario_id, n, distribution, range_low, range_high) in scenarios {
let bench_index = build_shared_indices(n, distribution);
let mut group = runner.new_group();
group.set_name(scenario_id);
let field = bench_index.searcher.schema().get_field("str_fast").unwrap();
let (lower_str, upper_str) =
if distribution == "dense_sequential" || distribution == "dense_random" {
(
format!("str_{:03}", range_low),
format!("str_{:03}", range_high),
)
} else {
(
format!("str_{:07}", range_low),
format!("str_{:07}", range_high),
)
};
let lower_term = Term::from_field_text(field, &lower_str);
let upper_term = Term::from_field_text(field, &upper_str);
let query = RangeQuery::new(Bound::Included(lower_term), Bound::Included(upper_term));
run_benchmark_tasks(&mut group, &bench_index, query, range_low, range_high);
group.run();
}
}
/// Run all benchmark tasks for a given range query
fn run_benchmark_tasks(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
range_low: u64,
range_high: u64,
) {
// Test count of matching documents
add_bench_task_count(
bench_group,
bench_index,
query.clone(),
range_low,
range_high,
);
// Test fetching all DocIds of matching documents
add_bench_task_docset(
bench_group,
bench_index,
query.clone(),
range_low,
range_high,
);
// Test fetching all string fast field values of matching documents
add_bench_task_fetch_all_strings(
bench_group,
bench_index,
query.clone(),
range_low,
range_high,
);
// Test fetching all string values of matching documents through doc() method
add_bench_task_fetch_all_strings_from_doc(
bench_group,
bench_index,
query,
range_low,
range_high,
);
}
fn add_bench_task_count(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
range_low: u64,
range_high: u64,
) {
let task_name = format!("string_search_count_[{}-{}]", range_low, range_high);
let search_task = CountSearchTask {
searcher: bench_index.searcher.clone(),
query,
};
bench_group.register(task_name, move |_| black_box(search_task.run()));
}
fn add_bench_task_docset(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
range_low: u64,
range_high: u64,
) {
let task_name = format!("string_fetch_all_docset_[{}-{}]", range_low, range_high);
let search_task = DocSetSearchTask {
searcher: bench_index.searcher.clone(),
query,
};
bench_group.register(task_name, move |_| black_box(search_task.run()));
}
fn add_bench_task_fetch_all_strings(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
range_low: u64,
range_high: u64,
) {
let task_name = format!(
"string_fastfield_fetch_all_strings_[{}-{}]",
range_low, range_high
);
let search_task = FetchAllStringsSearchTask {
searcher: bench_index.searcher.clone(),
query,
};
bench_group.register(task_name, move |_| {
let result = black_box(search_task.run());
result.len()
});
}
fn add_bench_task_fetch_all_strings_from_doc(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
range_low: u64,
range_high: u64,
) {
let task_name = format!(
"string_doc_fetch_all_strings_[{}-{}]",
range_low, range_high
);
let search_task = FetchAllStringsFromDocTask {
searcher: bench_index.searcher.clone(),
query,
};
bench_group.register(task_name, move |_| {
let result = black_box(search_task.run());
result.len()
});
}
struct CountSearchTask {
searcher: Searcher,
query: RangeQuery,
}
impl CountSearchTask {
#[inline(never)]
pub fn run(&self) -> usize {
self.searcher.search(&self.query, &Count).unwrap()
}
}
struct DocSetSearchTask {
searcher: Searcher,
query: RangeQuery,
}
impl DocSetSearchTask {
#[inline(never)]
pub fn run(&self) -> usize {
let result = self.searcher.search(&self.query, &DocSetCollector).unwrap();
result.len()
}
}
struct FetchAllStringsSearchTask {
searcher: Searcher,
query: RangeQuery,
}
impl FetchAllStringsSearchTask {
#[inline(never)]
pub fn run(&self) -> Vec<String> {
let doc_addresses = self.searcher.search(&self.query, &DocSetCollector).unwrap();
let mut docs = doc_addresses.into_iter().collect::<Vec<_>>();
docs.sort();
let mut strings = Vec::with_capacity(docs.len());
for doc_address in docs {
let segment_reader = &self.searcher.segment_readers()[doc_address.segment_ord as usize];
let str_column_opt = segment_reader.fast_fields().str("str_fast");
if let Ok(Some(str_column)) = str_column_opt {
let doc_id = doc_address.doc_id;
let term_ord = str_column.term_ords(doc_id).next().unwrap();
let mut str_buffer = String::new();
if str_column.ord_to_str(term_ord, &mut str_buffer).is_ok() {
strings.push(str_buffer);
}
}
}
strings
}
}
struct FetchAllStringsFromDocTask {
searcher: Searcher,
query: RangeQuery,
}
impl FetchAllStringsFromDocTask {
#[inline(never)]
pub fn run(&self) -> Vec<String> {
let doc_addresses = self.searcher.search(&self.query, &DocSetCollector).unwrap();
let mut docs = doc_addresses.into_iter().collect::<Vec<_>>();
docs.sort();
let mut strings = Vec::with_capacity(docs.len());
let str_stored_field = self
.searcher
.schema()
.get_field("str_stored")
.expect("str_stored field should exist");
for doc_address in docs {
// Get the document from the doc store (row store access)
if let Ok(doc) = self.searcher.doc::<TantivyDocument>(doc_address) {
// Extract string values from the stored field
if let Some(field_value) = doc.get_first(str_stored_field) {
if let Some(text) = field_value.as_value().as_str() {
strings.push(text.to_string());
}
}
}
}
strings
}
}

View File

@@ -18,5 +18,5 @@ homepage = "https://github.com/quickwit-oss/tantivy"
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker1x"] }
[dev-dependencies]
rand = "0.8"
rand = "0.9"
proptest = "1"

View File

@@ -4,8 +4,8 @@ extern crate test;
#[cfg(test)]
mod tests {
use rand::rng;
use rand::seq::IteratorRandom;
use rand::thread_rng;
use tantivy_bitpacker::{BitPacker, BitUnpacker, BlockedBitpacker};
use test::Bencher;
@@ -27,7 +27,7 @@ mod tests {
let num_els = 1_000_000u32;
let bit_unpacker = BitUnpacker::new(bit_width);
let data = create_bitpacked_data(bit_width, num_els);
let idxs: Vec<u32> = (0..num_els).choose_multiple(&mut thread_rng(), 100_000);
let idxs: Vec<u32> = (0..num_els).choose_multiple(&mut rng(), 100_000);
b.iter(|| {
let mut out = 0u64;
for &idx in &idxs {

View File

@@ -22,7 +22,7 @@ downcast-rs = "2.0.1"
[dev-dependencies]
proptest = "1"
more-asserts = "0.3.1"
rand = "0.8"
rand = "0.9"
binggan = "0.14.0"
[[bench]]

View File

@@ -9,7 +9,7 @@ use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_co
fn get_data() -> Vec<u64> {
let mut rng = StdRng::seed_from_u64(2u64);
let mut data: Vec<_> = (100..55_000_u64)
.map(|num| num + rng.r#gen::<u8>() as u64)
.map(|num| num + rng.random::<u8>() as u64)
.collect();
data.push(99_000);
data.insert(1000, 2000);

View File

@@ -6,7 +6,7 @@ use tantivy_columnar::column_values::{CodecType, serialize_u64_based_column_valu
fn get_data() -> Vec<u64> {
let mut rng = StdRng::seed_from_u64(2u64);
let mut data: Vec<_> = (100..55_000_u64)
.map(|num| num + rng.r#gen::<u8>() as u64)
.map(|num| num + rng.random::<u8>() as u64)
.collect();
data.push(99_000);
data.insert(1000, 2000);

View File

@@ -8,7 +8,7 @@ const TOTAL_NUM_VALUES: u32 = 1_000_000;
fn gen_optional_index(fill_ratio: f64) -> OptionalIndex {
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let vals: Vec<u32> = (0..TOTAL_NUM_VALUES)
.map(|_| rng.gen_bool(fill_ratio))
.map(|_| rng.random_bool(fill_ratio))
.enumerate()
.filter(|(_pos, val)| *val)
.map(|(pos, _)| pos as u32)
@@ -25,7 +25,7 @@ fn random_range_iterator(
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
let mut current = start;
std::iter::from_fn(move || {
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
current += rng.random_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
if current >= end { None } else { Some(current) }
})
}

View File

@@ -39,7 +39,7 @@ fn get_data_50percent_item() -> Vec<u128> {
let mut data = vec![];
for _ in 0..300_000 {
let val = rng.gen_range(1..=100);
let val = rng.random_range(1..=100);
data.push(val);
}
data.push(SINGLE_ITEM);

View File

@@ -34,7 +34,7 @@ fn get_data_50percent_item() -> Vec<u128> {
let mut data = vec![];
for _ in 0..300_000 {
let val = rng.gen_range(1..=100);
let val = rng.random_range(1..=100);
data.push(val);
}
data.push(SINGLE_ITEM);

View File

@@ -268,7 +268,7 @@ mod tests {
#[test]
fn linear_interpol_fast_field_rand() {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
for _ in 0..50 {
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
create_and_validate::<LinearCodec>(&data, "random");

View File

@@ -122,7 +122,7 @@ pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
assert_eq!(vals, buffer);
if !vals.is_empty() {
let test_rand_idx = rand::thread_rng().gen_range(0..=vals.len() - 1);
let test_rand_idx = rand::rng().random_range(0..=vals.len() - 1);
let expected_positions: Vec<u32> = vals
.iter()
.enumerate()

View File

@@ -21,5 +21,5 @@ serde = { version = "1.0.136", features = ["derive"] }
[dev-dependencies]
binggan = "0.14.0"
proptest = "1.0.0"
rand = "0.8.4"
rand = "0.9"

View File

@@ -1,6 +1,6 @@
use binggan::{BenchRunner, black_box};
use rand::rng;
use rand::seq::IteratorRandom;
use rand::thread_rng;
use tantivy_common::{BitSet, TinySet, serialize_vint_u32};
fn bench_vint() {
@@ -17,7 +17,7 @@ fn bench_vint() {
black_box(out);
});
let vals: Vec<u32> = (0..20_000).choose_multiple(&mut thread_rng(), 100_000);
let vals: Vec<u32> = (0..20_000).choose_multiple(&mut rng(), 100_000);
runner.bench_function("bench_vint_rand", move |_| {
let mut out = 0u64;
for val in vals.iter().cloned() {

View File

@@ -178,11 +178,13 @@ impl TinySet {
#[derive(Clone)]
pub struct BitSet {
tinysets: Box<[TinySet]>,
len: u64,
max_value: u32,
}
impl std::fmt::Debug for BitSet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BitSet")
.field("len", &self.len)
.field("max_value", &self.max_value)
.finish()
}
@@ -210,6 +212,7 @@ impl BitSet {
let tinybitsets = vec![TinySet::empty(); num_buckets as usize].into_boxed_slice();
BitSet {
tinysets: tinybitsets,
len: 0,
max_value,
}
}
@@ -227,6 +230,7 @@ impl BitSet {
}
BitSet {
tinysets: tinybitsets,
len: max_value as u64,
max_value,
}
}
@@ -245,19 +249,17 @@ impl BitSet {
/// Intersect with tinysets
fn intersect_update_with_iter(&mut self, other: impl Iterator<Item = TinySet>) {
self.len = 0;
for (left, right) in self.tinysets.iter_mut().zip(other) {
*left = left.intersect(right);
self.len += left.len() as u64;
}
}
/// Returns the number of elements in the `BitSet`.
#[inline]
pub fn len(&self) -> usize {
self.tinysets
.iter()
.copied()
.map(|tinyset| tinyset.len())
.sum::<u32>() as usize
self.len as usize
}
/// Inserts an element in the `BitSet`
@@ -266,7 +268,7 @@ impl BitSet {
// we do not check saturated els.
let higher = el / 64u32;
let lower = el % 64u32;
self.tinysets[higher as usize].insert_mut(lower);
self.len += u64::from(self.tinysets[higher as usize].insert_mut(lower));
}
/// Inserts an element in the `BitSet`
@@ -275,7 +277,7 @@ impl BitSet {
// we do not check saturated els.
let higher = el / 64u32;
let lower = el % 64u32;
self.tinysets[higher as usize].remove_mut(lower);
self.len -= u64::from(self.tinysets[higher as usize].remove_mut(lower));
}
/// Returns true iff the elements is in the `BitSet`.
@@ -414,7 +416,7 @@ mod tests {
use std::collections::HashSet;
use ownedbytes::OwnedBytes;
use rand::distributions::Bernoulli;
use rand::distr::Bernoulli;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};

View File

@@ -91,10 +91,46 @@ fn main() -> tantivy::Result<()> {
}
}
// Some other powerful operations (especially `.seek`) may be useful to consume these
// A `Term` is a text token associated with a field.
// Let's go through all docs containing the term `title:the` and access their position
let term_the = Term::from_field_text(title, "the");
// Some other powerful operations (especially `.skip_to`) may be useful to consume these
// posting lists rapidly.
// You can check for them in the [`DocSet`](https://docs.rs/tantivy/~0/tantivy/trait.DocSet.html) trait
// and the [`Postings`](https://docs.rs/tantivy/~0/tantivy/trait.Postings.html) trait
// Also, for some VERY specific high performance use case like an OLAP analysis of logs,
// you can get better performance by accessing directly the blocks of doc ids.
for segment_reader in searcher.segment_readers() {
// A segment contains different data structure.
// Inverted index stands for the combination of
// - the term dictionary
// - the inverted lists associated with each terms and their positions
let inverted_index = segment_reader.inverted_index(title)?;
// This segment posting object is like a cursor over the documents matching the term.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term
// frequencies and positions.
//
// If you don't need all this information, you may get better performance by decompressing
// less information.
if let Some(mut block_segment_postings) =
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
{
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
// Once again these docs MAY contains deleted documents as well.
let docs = block_segment_postings.docs();
// Prints `Docs [0, 2].`
println!("Docs {docs:?}");
block_segment_postings.advance();
}
}
}
Ok(())
}

View File

@@ -1,169 +0,0 @@
/// Codec specific to postings data.
pub mod postings;
/// Standard tantivy codec. This is the codec you use by default.
pub mod standard;
use std::io;
pub use standard::StandardCodec;
use crate::codec::postings::PostingsCodec;
use crate::fieldnorm::FieldNormReader;
use crate::postings::{Postings, TermInfo};
use crate::query::{box_scorer, Bm25Weight, Scorer};
use crate::schema::IndexRecordOption;
use crate::{DocId, InvertedIndexReader, Score};
/// Codecs describes how data is layed out on disk.
///
/// For the moment, only postings codec can be custom.
pub trait Codec: Clone + std::fmt::Debug + Send + Sync + 'static {
/// The specific postings type used by this codec.
type PostingsCodec: PostingsCodec;
/// Name of the codec. It should be unique to your codec.
const NAME: &'static str;
/// Load codec based on the codec configuration.
fn from_json_props(json_value: &serde_json::Value) -> crate::Result<Self>;
/// Get codec configuration.
fn to_json_props(&self) -> serde_json::Value;
/// Returns the postings codec.
fn postings_codec(&self) -> &Self::PostingsCodec;
}
/// Object-safe codec is a Codec that can be used in a trait object.
///
/// The point of it is to offer a way to use a codec without a proliferation of generics.
pub trait ObjectSafeCodec: 'static + Send + Sync {
/// Loads a type-erased Postings object for the given term.
///
/// If the schema used to build the index did not provide enough
/// information to match the requested `option`, a Postings is still
/// returned in a best-effort manner.
fn load_postings_type_erased(
&self,
term_info: &TermInfo,
option: IndexRecordOption,
inverted_index_reader: &InvertedIndexReader,
) -> io::Result<Box<dyn Postings>>;
/// Loads a type-erased TermScorer object for the given term.
///
/// If the schema used to build the index did not provide enough
/// information to match the requested `option`, a TermScorer is still
/// returned in a best-effort manner.
///
/// The point of this contraption is that the return TermScorer is backed,
/// not by Box<dyn Postings> but by the codec's concrete Postings type.
fn load_term_scorer_type_erased(
&self,
term_info: &TermInfo,
option: IndexRecordOption,
inverted_index_reader: &InvertedIndexReader,
fieldnorm_reader: FieldNormReader,
similarity_weight: Bm25Weight,
) -> io::Result<Box<dyn Scorer>>;
/// Loads a type-erased PhraseScorer object for the given term.
///
/// If the schema used to build the index did not provide enough
/// information to match the requested `option`, a TermScorer is still
/// returned in a best-effort manner.
///
/// The point of this contraption is that the return PhraseScorer is backed,
/// not by Box<dyn Postings> but by the codec's concrete Postings type.
fn new_phrase_scorer_type_erased(
&self,
term_infos: &[(usize, TermInfo)],
similarity_weight: Option<Bm25Weight>,
fieldnorm_reader: FieldNormReader,
slop: u32,
inverted_index_reader: &InvertedIndexReader,
) -> io::Result<Box<dyn Scorer>>;
/// Performs a for_each_pruning operation on the given scorer.
///
/// The function will go through matching documents and call the callback
/// function for all docs with a score exceeding the threshold.
///
/// The function itself will return a larger threshold value,
/// meant to update the threshold value.
///
/// If the codec and the scorer allow it, this function can rely on
/// optimizations like the block-max wand.
fn for_each_pruning(
&self,
threshold: Score,
scorer: Box<dyn Scorer>,
callback: &mut dyn FnMut(DocId, Score) -> Score,
);
}
impl<TCodec: Codec> ObjectSafeCodec for TCodec {
fn load_postings_type_erased(
&self,
term_info: &TermInfo,
option: IndexRecordOption,
inverted_index_reader: &InvertedIndexReader,
) -> io::Result<Box<dyn Postings>> {
let postings = inverted_index_reader
.read_postings_from_terminfo_specialized(term_info, option, self)?;
Ok(Box::new(postings))
}
fn load_term_scorer_type_erased(
&self,
term_info: &TermInfo,
option: IndexRecordOption,
inverted_index_reader: &InvertedIndexReader,
fieldnorm_reader: FieldNormReader,
similarity_weight: Bm25Weight,
) -> io::Result<Box<dyn Scorer>> {
let scorer = inverted_index_reader.new_term_scorer_specialized(
term_info,
option,
fieldnorm_reader,
similarity_weight,
self,
)?;
Ok(box_scorer(scorer))
}
fn new_phrase_scorer_type_erased(
&self,
term_infos: &[(usize, TermInfo)],
similarity_weight: Option<Bm25Weight>,
fieldnorm_reader: FieldNormReader,
slop: u32,
inverted_index_reader: &InvertedIndexReader,
) -> io::Result<Box<dyn Scorer>> {
let scorer = inverted_index_reader.new_phrase_scorer_type_specialized(
term_infos,
similarity_weight,
fieldnorm_reader,
slop,
self,
)?;
Ok(box_scorer(scorer))
}
fn for_each_pruning(
&self,
threshold: Score,
scorer: Box<dyn Scorer>,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let accerelerated_foreach_pruning_res =
<TCodec as Codec>::PostingsCodec::try_accelerated_for_each_pruning(
threshold, scorer, callback,
);
if let Err(mut scorer) = accerelerated_foreach_pruning_res {
// No acceleration available. We need to do things manually.
scorer.for_each_pruning(threshold, callback);
}
}
}

View File

@@ -1,119 +0,0 @@
use std::io;
/// Block-max WAND algorithm.
pub mod block_wand;
use common::OwnedBytes;
use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::query::{Bm25Weight, Scorer};
use crate::schema::IndexRecordOption;
use crate::{DocId, Score};
/// Postings codec.
pub trait PostingsCodec: Send + Sync + 'static {
/// Serializer type for the postings codec.
type PostingsSerializer: PostingsSerializer;
/// Postings type for the postings codec.
type Postings: Postings + Clone;
/// Creates a new postings serializer.
fn new_serializer(
&self,
avg_fieldnorm: Score,
mode: IndexRecordOption,
fieldnorm_reader: Option<FieldNormReader>,
) -> Self::PostingsSerializer;
/// Loads postings
///
/// Record option is the option that was passed at indexing time.
/// Requested option is the option that is requested.
///
/// For instance, we may have term_freq in the posting list
/// but we can skip decompressing as we read the posting list.
///
/// If record option does not support the requested option,
/// this method does NOT return an error and will in fact restrict
/// requested_option to what is available.
fn load_postings(
&self,
doc_freq: u32,
postings_data: OwnedBytes,
record_option: IndexRecordOption,
requested_option: IndexRecordOption,
positions_data: Option<OwnedBytes>,
) -> io::Result<Self::Postings>;
/// If your codec supports different ways to accelerate `for_each_pruning` that's
/// where you should implement it.
///
/// Returning `Err(scorer)` without mutating the scorer nor calling the callback function,
/// is never "wrong". It just leaves the responsability to the caller to call a fallback
/// implementation on the scorer.
///
/// If your codec supports BlockMax-Wand, you just need to have your
/// postings implement `PostingsWithBlockMax` and copy what is done in the StandardPostings
/// codec to enable it.
fn try_accelerated_for_each_pruning(
_threshold: Score,
scorer: Box<dyn Scorer>,
_callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> Result<(), Box<dyn Scorer>> {
Err(scorer)
}
}
/// A postings serializer is a listener that is in charge of serializing postings
///
/// IO is done only once per postings, once all of the data has been received.
/// A serializer will therefore contain internal buffers.
///
/// A serializer is created once and recycled for all postings.
///
/// Clients should use PostingsSerializer as follows.
/// ```
/// // First postings list
/// serializer.new_term(2, true);
/// serializer.write_doc(2, 1);
/// serializer.write_doc(6, 2);
/// serializer.close_term(3);
/// serializer.clear();
/// // Second postings list
/// serializer.new_term(1, true);
/// serializer.write_doc(3, 1);
/// serializer.close_term(3);
/// ```
pub trait PostingsSerializer {
/// The term_doc_freq here is the number of documents
/// in the postings lists.
///
/// It can be used to compute the idf that will be used for the
/// blockmax parameters.
///
/// If not available (e.g. if we do not collect `term_frequencies`
/// blockwand is disabled), the term_doc_freq passed will be set 0.
fn new_term(&mut self, term_doc_freq: u32, record_term_freq: bool);
/// Records a new document id for the current term.
/// The serializer may ignore it.
fn write_doc(&mut self, doc_id: DocId, term_freq: u32);
/// Closes the current term and writes the postings list associated.
fn close_term(&mut self, doc_freq: u32, wrt: &mut impl io::Write) -> io::Result<()>;
}
/// A light complement interface to Postings to allow block-max wand acceleration.
pub trait PostingsWithBlockMax: Postings {
/// Moves the postings to the block containign `target_doc` and returns
/// an upperbound of the score for documents in the block.
///
/// `Warning`: Calling this method may leave the postings in an invalid state.
/// callers are required to call seek before calling any other of the
/// `Postings` method (like doc / advance etc.).
fn seek_block_max(&mut self, target_doc: crate::DocId, similarity_weight: &Bm25Weight)
-> Score;
/// Returns the last document in the current block (or Terminated if this
/// is the last block).
fn last_doc_in_block(&self) -> crate::DocId;
}

View File

@@ -1,35 +0,0 @@
use serde::{Deserialize, Serialize};
use crate::codec::standard::postings::StandardPostingsCodec;
use crate::codec::Codec;
/// Tantivy's default postings codec.
pub mod postings;
/// Tantivy's default codec.
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct StandardCodec;
impl Codec for StandardCodec {
type PostingsCodec = StandardPostingsCodec;
const NAME: &'static str = "standard";
fn from_json_props(json_value: &serde_json::Value) -> crate::Result<Self> {
if !json_value.is_null() {
return Err(crate::TantivyError::InvalidArgument(format!(
"Codec property for the StandardCodec are unexpected. expected null, got {}",
json_value.as_str().unwrap_or("null")
)));
}
Ok(StandardCodec)
}
fn to_json_props(&self) -> serde_json::Value {
serde_json::Value::Null
}
fn postings_codec(&self) -> &Self::PostingsCodec {
&StandardPostingsCodec
}
}

View File

@@ -1,50 +0,0 @@
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::DocId;
pub struct Block {
doc_ids: [DocId; COMPRESSION_BLOCK_SIZE],
term_freqs: [u32; COMPRESSION_BLOCK_SIZE],
len: usize,
}
impl Block {
pub fn new() -> Self {
Block {
doc_ids: [0u32; COMPRESSION_BLOCK_SIZE],
term_freqs: [0u32; COMPRESSION_BLOCK_SIZE],
len: 0,
}
}
pub fn doc_ids(&self) -> &[DocId] {
&self.doc_ids[..self.len]
}
pub fn term_freqs(&self) -> &[u32] {
&self.term_freqs[..self.len]
}
pub fn clear(&mut self) {
self.len = 0;
}
pub fn append_doc(&mut self, doc: DocId, term_freq: u32) {
let len = self.len;
self.doc_ids[len] = doc;
self.term_freqs[len] = term_freq;
self.len = len + 1;
}
pub fn is_full(&self) -> bool {
self.len == COMPRESSION_BLOCK_SIZE
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn last_doc(&self) -> DocId {
assert_eq!(self.len, COMPRESSION_BLOCK_SIZE);
self.doc_ids[COMPRESSION_BLOCK_SIZE - 1]
}
}

View File

@@ -1,107 +0,0 @@
use std::io;
use crate::codec::postings::block_wand::{block_wand, block_wand_single_scorer};
use crate::codec::postings::PostingsCodec;
use crate::codec::standard::postings::block_segment_postings::BlockSegmentPostings;
use crate::codec::standard::postings::segment_postings::SegmentPostings;
use crate::fieldnorm::FieldNormReader;
use crate::positions::PositionReader;
use crate::query::term_query::TermScorer;
use crate::query::{BufferedUnionScorer, Scorer, SumCombiner};
use crate::schema::IndexRecordOption;
use crate::{DocSet as _, Score, TERMINATED};
mod block;
mod block_segment_postings;
mod segment_postings;
mod skip;
mod standard_postings_serializer;
pub use segment_postings::SegmentPostings as StandardPostings;
pub use standard_postings_serializer::StandardPostingsSerializer;
/// The default postings codec for tantivy.
pub struct StandardPostingsCodec;
#[expect(clippy::enum_variant_names)]
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
pub(crate) enum FreqReadingOption {
NoFreq,
SkipFreq,
ReadFreq,
}
impl PostingsCodec for StandardPostingsCodec {
type PostingsSerializer = StandardPostingsSerializer;
type Postings = SegmentPostings;
fn new_serializer(
&self,
avg_fieldnorm: Score,
mode: IndexRecordOption,
fieldnorm_reader: Option<FieldNormReader>,
) -> Self::PostingsSerializer {
StandardPostingsSerializer::new(avg_fieldnorm, mode, fieldnorm_reader)
}
fn load_postings(
&self,
doc_freq: u32,
postings_data: common::OwnedBytes,
record_option: IndexRecordOption,
requested_option: IndexRecordOption,
positions_data_opt: Option<common::OwnedBytes>,
) -> io::Result<Self::Postings> {
// Rationalize record_option/requested_option.
let requested_option = requested_option.downgrade(record_option);
let block_segment_postings =
BlockSegmentPostings::open(doc_freq, postings_data, record_option, requested_option)?;
let position_reader = positions_data_opt.map(PositionReader::open).transpose()?;
Ok(SegmentPostings::from_block_postings(
block_segment_postings,
position_reader,
))
}
fn try_accelerated_for_each_pruning(
mut threshold: Score,
mut scorer: Box<dyn Scorer>,
callback: &mut dyn FnMut(crate::DocId, Score) -> Score,
) -> Result<(), Box<dyn Scorer>> {
scorer = match scorer.downcast::<TermScorer<Self::Postings>>() {
Ok(term_scorer) => {
block_wand_single_scorer(*term_scorer, threshold, callback);
return Ok(());
}
Err(scorer) => scorer,
};
let mut union_scorer =
scorer.downcast::<BufferedUnionScorer<Box<dyn Scorer>, SumCombiner>>()?;
if !union_scorer
.scorers()
.iter()
.all(|scorer| scorer.is::<TermScorer<Self::Postings>>())
{
return Err(union_scorer);
}
let doc = union_scorer.doc();
if doc == TERMINATED {
return Ok(());
}
let score = union_scorer.score();
if score > threshold {
threshold = callback(doc, score);
}
let boxed_scorers: Vec<Box<dyn Scorer>> = union_scorer.into_scorers();
let scorers: Vec<TermScorer<Self::Postings>> = boxed_scorers
.into_iter()
.map(|scorer| {
*scorer.downcast::<TermScorer<Self::Postings>>().ok().expect(
"Downcast failed despite the fact we already checked the type was correct",
)
})
.collect();
block_wand(scorers, threshold, callback);
Ok(())
}
}

View File

@@ -1,185 +0,0 @@
use std::cmp::Ordering;
use std::io::{self, Write as _};
use common::{BinarySerializable as _, VInt};
use crate::codec::postings::PostingsSerializer;
use crate::codec::standard::postings::block::Block;
use crate::codec::standard::postings::skip::SkipSerializer;
use crate::fieldnorm::FieldNormReader;
use crate::postings::compression::{BlockEncoder, VIntEncoder as _, COMPRESSION_BLOCK_SIZE};
use crate::query::Bm25Weight;
use crate::schema::IndexRecordOption;
use crate::{DocId, Score};
/// Default tantivy postings codec serializer.
pub struct StandardPostingsSerializer {
last_doc_id_encoded: u32,
block_encoder: BlockEncoder,
block: Box<Block>,
postings_write: Vec<u8>,
skip_write: SkipSerializer,
mode: IndexRecordOption,
fieldnorm_reader: Option<FieldNormReader>,
bm25_weight: Option<Bm25Weight>,
avg_fieldnorm: Score, /* Average number of term in the field for that segment.
* this value is used to compute the block wand information. */
term_has_freq: bool,
}
impl StandardPostingsSerializer {
/// Creates a new instance of `StandardPostingsSerializer`.
pub fn new(
avg_fieldnorm: Score,
mode: IndexRecordOption,
fieldnorm_reader: Option<FieldNormReader>,
) -> StandardPostingsSerializer {
Self {
last_doc_id_encoded: 0,
block_encoder: BlockEncoder::new(),
block: Box::new(Block::new()),
postings_write: Vec::new(),
skip_write: SkipSerializer::new(),
mode,
fieldnorm_reader,
bm25_weight: None,
avg_fieldnorm,
term_has_freq: false,
}
}
}
impl PostingsSerializer for StandardPostingsSerializer {
fn new_term(&mut self, term_doc_freq: u32, record_term_freq: bool) {
self.clear();
self.term_has_freq = self.mode.has_freq() && record_term_freq;
if !self.term_has_freq {
return;
}
let num_docs_in_segment: u64 =
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
fieldnorm_reader.num_docs() as u64
} else {
return;
};
if num_docs_in_segment == 0 {
return;
}
self.bm25_weight = Some(Bm25Weight::for_one_term_without_explain(
term_doc_freq as u64,
num_docs_in_segment,
self.avg_fieldnorm,
));
}
fn write_doc(&mut self, doc_id: DocId, term_freq: u32) {
self.block.append_doc(doc_id, term_freq);
if self.block.is_full() {
self.write_block();
}
}
fn close_term(&mut self, doc_freq: u32, output_write: &mut impl io::Write) -> io::Result<()> {
if !self.block.is_empty() {
// we have doc ids waiting to be written
// this happens when the number of doc ids is
// not a perfect multiple of our block size.
//
// In that case, the remaining part is encoded
// using variable int encoding.
{
let block_encoded = self
.block_encoder
.compress_vint_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
self.postings_write.write_all(block_encoded)?;
}
// ... Idem for term frequencies
if self.term_has_freq {
let block_encoded = self
.block_encoder
.compress_vint_unsorted(self.block.term_freqs());
self.postings_write.write_all(block_encoded)?;
}
self.block.clear();
}
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
let skip_data = self.skip_write.data();
VInt(skip_data.len() as u64).serialize(output_write)?;
output_write.write_all(skip_data)?;
}
output_write.write_all(&self.postings_write[..])?;
self.skip_write.clear();
self.postings_write.clear();
self.bm25_weight = None;
Ok(())
}
}
impl StandardPostingsSerializer {
fn clear(&mut self) {
self.bm25_weight = None;
self.block.clear();
self.last_doc_id_encoded = 0;
}
fn write_block(&mut self) {
{
// encode the doc ids
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
self.last_doc_id_encoded = self.block.last_doc();
self.skip_write
.write_doc(self.last_doc_id_encoded, num_bits);
// last el block 0, offset block 1,
self.postings_write.extend(block_encoded);
}
if self.term_has_freq {
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_unsorted(self.block.term_freqs(), true);
self.postings_write.extend(block_encoded);
self.skip_write.write_term_freq(num_bits);
if self.mode.has_positions() {
// We serialize the sum of term freqs within the skip information
// in order to navigate through positions.
let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq);
}
let mut blockwand_params = (0u8, 0u32);
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
let docs = self.block.doc_ids().iter().cloned();
let term_freqs = self.block.term_freqs().iter().cloned();
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
blockwand_params = fieldnorms
.zip(term_freqs)
.max_by(
|(left_fieldnorm_id, left_term_freq),
(right_fieldnorm_id, right_term_freq)| {
let left_score =
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
let right_score =
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
left_score
.partial_cmp(&right_score)
.unwrap_or(Ordering::Equal)
},
)
.unwrap();
}
}
let (fieldnorm_id, term_freq) = blockwand_params;
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
}
self.block.clear();
}
}

View File

@@ -486,9 +486,9 @@ mod tests {
use std::collections::BTreeSet;
use columnar::Dictionary;
use rand::distributions::Uniform;
use rand::distr::Uniform;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng};
use rand::{rng, Rng};
use super::{FacetCollector, FacetCounts};
use crate::collector::facet_collector::compress_mapping;
@@ -731,7 +731,7 @@ mod tests {
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let uniform = Uniform::new_inclusive(1, 100_000);
let uniform = Uniform::new_inclusive(1, 100_000).unwrap();
let mut docs: Vec<TantivyDocument> =
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
.into_iter()
@@ -741,14 +741,11 @@ mod tests {
std::iter::repeat_n(doc, count)
})
.map(|mut doc| {
doc.add_facet(
facet_field,
&format!("/facet/{}", thread_rng().sample(uniform)),
);
doc.add_facet(facet_field, &format!("/facet/{}", rng().sample(uniform)));
doc
})
.collect();
docs[..].shuffle(&mut thread_rng());
docs[..].shuffle(&mut rng());
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
for doc in docs {
@@ -822,8 +819,8 @@ mod tests {
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::rng;
use rand::seq::SliceRandom;
use rand::thread_rng;
use test::Bencher;
use crate::collector::FacetCollector;
@@ -846,7 +843,7 @@ mod bench {
}
}
// 40425 docs
docs[..].shuffle(&mut thread_rng());
docs[..].shuffle(&mut rng());
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
for doc in docs {

View File

@@ -160,7 +160,7 @@ mod tests {
expected: &[(crate::Score, usize)],
) {
let mut vals: Vec<(crate::Score, usize)> = (0..10).map(|val| (val as f32, val)).collect();
vals.shuffle(&mut rand::thread_rng());
vals.shuffle(&mut rand::rng());
let vals_merged = merge_top_k(vals.into_iter(), doc_range, ComparatorEnum::from(order));
assert_eq!(&vals_merged, expected);
}

View File

@@ -4,7 +4,7 @@ use common::{replace_in_place, JsonPathWriter};
use rustc_hash::FxHashMap;
use crate::indexer::indexing_term::IndexingTerm;
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter as _, PostingsWriterEnum};
use crate::postings::{IndexingContext, IndexingPosition, PostingsWriter};
use crate::schema::document::{ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::{Type, DATE_TIME_PRECISION_INDEXED};
use crate::time::format_description::well_known::Rfc3339;
@@ -80,7 +80,7 @@ fn index_json_object<'a, V: Value<'a>>(
text_analyzer: &mut TextAnalyzer,
term_buffer: &mut IndexingTerm,
json_path_writer: &mut JsonPathWriter,
postings_writer: &mut PostingsWriterEnum,
postings_writer: &mut dyn PostingsWriter,
ctx: &mut IndexingContext,
positions_per_path: &mut IndexingPositionsPerPath,
) {
@@ -110,7 +110,7 @@ pub(crate) fn index_json_value<'a, V: Value<'a>>(
text_analyzer: &mut TextAnalyzer,
term_buffer: &mut IndexingTerm,
json_path_writer: &mut JsonPathWriter,
postings_writer: &mut PostingsWriterEnum,
postings_writer: &mut dyn PostingsWriter,
ctx: &mut IndexingContext,
positions_per_path: &mut IndexingPositionsPerPath,
) {

View File

@@ -1,7 +1,5 @@
use std::borrow::{Borrow, BorrowMut};
use common::BitSet;
use crate::fastfield::AliveBitSet;
use crate::DocId;
@@ -53,31 +51,55 @@ pub trait DocSet: Send {
doc
}
/// Seeks to the target if possible and returns true if the target is in the DocSet.
/// !!!Dragons ahead!!!
/// In spirit, this is an approximate and dangerous version of `seek`.
///
/// It can leave the DocSet in an `invalid` state and might return a
/// lower bound of what the result of Seek would have been.
///
///
/// More accurately it returns either:
/// - Found if the target is in the docset. In that case, the DocSet is left in a valid state.
/// - SeekLowerBound(seek_lower_bound) if the target is not in the docset. In that case, The
/// DocSet can be the left in a invalid state. The DocSet should then only receives call to
/// `seek_danger(..)` until it returns `Found`, and get back to a valid state.
///
/// `seek_lower_bound` can be any `DocId` (in the docset or not) as long as it is in
/// `(target .. seek_result]` where `seek_result` is the first document in the docset greater
/// than to `target`.
///
/// `seek_danger` may return `SeekLowerBound(TERMINATED)`.
///
/// Calling `seek_danger` with TERMINATED as a target is allowed,
/// and should always return NewTarget(TERMINATED) or anything larger as TERMINATED is NOT in
/// the DocSet.
///
/// DocSets that already have an efficient `seek` method don't need to implement
/// `seek_into_the_danger_zone`. All wrapper DocSets should forward
/// `seek_into_the_danger_zone` to the underlying DocSet.
/// `seek_danger`.
///
/// ## API Behaviour
/// If `seek_into_the_danger_zone` is returning true, a call to `doc()` has to return target.
/// If `seek_into_the_danger_zone` is returning false, a call to `doc()` may return any doc
/// between the last doc that matched and target or a doc that is a valid next hit after
/// target. The DocSet is considered to be in an invalid state until
/// `seek_into_the_danger_zone` returns true again.
///
/// `target` needs to be equal or larger than `doc` when in a valid state.
///
/// Consecutive calls are not allowed to have decreasing `target` values.
///
/// # Warning
/// This is an advanced API used by intersection. The API contract is tricky, avoid using it.
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
let current_doc = self.doc();
if current_doc < target {
self.seek(target);
/// Consecutive calls to seek_danger are guaranteed to have strictly increasing `target`
/// values.
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
if target >= TERMINATED {
debug_assert!(target == TERMINATED);
// No need to advance.
return SeekDangerResult::SeekLowerBound(target);
}
// The default implementation does not include any
// `danger zone` behavior.
//
// It does not leave the scorer in an invalid state.
// For this reason, we can safely call `self.doc()`.
let mut doc = self.doc();
if doc < target {
doc = self.seek(target);
}
if doc == target {
SeekDangerResult::Found
} else {
SeekDangerResult::SeekLowerBound(self.doc())
}
self.doc() == target
}
/// Fills a given mutable buffer with the next doc ids from the
@@ -108,15 +130,6 @@ pub trait DocSet: Send {
buffer.len()
}
/// TODO comment on the size of the bitset
fn fill_bitset(&mut self, bitset: &mut BitSet) {
let mut doc = self.doc();
while doc != TERMINATED {
bitset.insert(doc);
doc = self.advance();
}
}
/// Returns the current document
/// Right after creating a new `DocSet`, the docset points to the first document.
///
@@ -177,6 +190,17 @@ pub trait DocSet: Send {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SeekDangerResult {
/// The target was found in the DocSet.
Found,
/// The target was not found in the DocSet.
/// We return a range in which the value could be.
/// The given target can be any DocId, that is <= than the first document
/// in the docset after the target.
SeekLowerBound(DocId),
}
impl DocSet for &mut dyn DocSet {
fn advance(&mut self) -> u32 {
(**self).advance()
@@ -186,8 +210,8 @@ impl DocSet for &mut dyn DocSet {
(**self).seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
(**self).seek_into_the_danger_zone(target)
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
(**self).seek_danger(target)
}
fn doc(&self) -> u32 {
@@ -222,9 +246,9 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
unboxed.seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.seek_into_the_danger_zone(target)
unboxed.seek_danger(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId; COLLECT_BLOCK_BUFFER_LEN]) -> usize {

View File

@@ -162,7 +162,7 @@ mod tests {
mod bench {
use rand::prelude::IteratorRandom;
use rand::thread_rng;
use rand::rng;
use test::Bencher;
use super::AliveBitSet;
@@ -176,7 +176,7 @@ mod bench {
}
fn remove_rand(raw: &mut Vec<u32>) {
let i = (0..raw.len()).choose(&mut thread_rng()).unwrap();
let i = (0..raw.len()).choose(&mut rng()).unwrap();
raw.remove(i);
}

View File

@@ -879,7 +879,7 @@ mod tests {
const ONE_HOUR_IN_MICROSECS: i64 = 3_600 * 1_000_000;
let times: Vec<DateTime> = std::iter::repeat_with(|| {
// +- One hour.
let t = T0 + rng.gen_range(-ONE_HOUR_IN_MICROSECS..ONE_HOUR_IN_MICROSECS);
let t = T0 + rng.random_range(-ONE_HOUR_IN_MICROSECS..ONE_HOUR_IN_MICROSECS);
DateTime::from_timestamp_micros(t)
})
.take(1_000)

View File

@@ -1,6 +1,6 @@
use std::collections::HashSet;
use rand::{thread_rng, Rng};
use rand::{rng, Rng};
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::schema::*;
@@ -29,7 +29,7 @@ fn test_functional_store() -> crate::Result<()> {
let index = Index::create_in_ram(schema);
let reader = index.reader()?;
let mut rng = thread_rng();
let mut rng = rng();
let mut index_writer: IndexWriter =
index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
@@ -38,9 +38,9 @@ fn test_functional_store() -> crate::Result<()> {
let mut doc_id = 0u64;
for _iteration in 0..get_num_iterations() {
let num_docs: usize = rng.gen_range(0..4);
let num_docs: usize = rng.random_range(0..4);
if !doc_set.is_empty() {
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
let doc_to_remove_id = rng.random_range(0..doc_set.len());
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
}
@@ -70,10 +70,10 @@ const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat \
non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
fn get_text() -> String {
use rand::seq::SliceRandom;
let mut rng = thread_rng();
use rand::seq::IndexedRandom;
let mut rng = rng();
let tokens: Vec<_> = LOREM.split(' ').collect();
let random_val = rng.gen_range(0..20);
let random_val = rng.random_range(0..20);
(0..random_val)
.map(|_| tokens.choose(&mut rng).unwrap())
@@ -101,7 +101,7 @@ fn test_functional_indexing_unsorted() -> crate::Result<()> {
let index = Index::create_from_tempdir(schema)?;
let reader = index.reader()?;
let mut rng = thread_rng();
let mut rng = rng();
let mut index_writer: IndexWriter =
index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
@@ -110,7 +110,7 @@ fn test_functional_indexing_unsorted() -> crate::Result<()> {
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..get_num_iterations() {
let random_val = rng.gen_range(0..20);
let random_val = rng.random_range(0..20);
if random_val == 0 {
index_writer.commit()?;
committed_docs.extend(&uncommitted_docs);

View File

@@ -1,46 +0,0 @@
use std::borrow::Cow;
use serde::{Deserialize, Serialize};
use crate::codec::{Codec, StandardCodec};
/// A small struct representing a codec configuration.
/// It is meant to be serialized in the index metadata file.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct CodecConfiguration {
name: Cow<'static, str>,
#[serde(default, skip_serializing_if = "serde_json::Value::is_null")]
props: serde_json::Value,
}
impl CodecConfiguration {
/// Builds the codec given a codec_configuration.
///
/// If the configuration codec name does not match the type C,
/// a tantivy error is returned.
pub fn to_codec<C: Codec>(&self) -> crate::Result<C> {
if self.name != C::NAME {
return Err(crate::TantivyError::InvalidArgument(format!(
"Codec name mismatch: expected {}, got {}",
C::NAME,
self.name
)));
}
C::from_json_props(&self.props)
}
}
impl<'a, C: Codec> From<&'a C> for CodecConfiguration {
fn from(codec: &'a C) -> Self {
CodecConfiguration {
name: Cow::Borrowed(C::NAME),
props: codec.to_json_props(),
}
}
}
impl Default for CodecConfiguration {
fn default() -> Self {
CodecConfiguration::from(&StandardCodec)
}
}

View File

@@ -8,14 +8,12 @@ use std::thread::available_parallelism;
use super::segment::Segment;
use super::segment_reader::merge_field_meta_data;
use super::{FieldMetadata, IndexSettings};
use crate::codec::StandardCodec;
use crate::core::{Executor, META_FILEPATH};
use crate::directory::error::OpenReadError;
#[cfg(feature = "mmap")]
use crate::directory::MmapDirectory;
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
use crate::error::{DataCorruption, TantivyError};
use crate::index::codec_configuration::CodecConfiguration;
use crate::index::{IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory};
use crate::indexer::index_writer::{
IndexWriterOptions, MAX_NUM_THREAD, MEMORY_BUDGET_NUM_BYTES_MIN,
@@ -61,7 +59,6 @@ fn save_new_metas(
schema: Schema,
index_settings: IndexSettings,
directory: &dyn Directory,
codec: CodecConfiguration,
) -> crate::Result<()> {
save_metas(
&IndexMeta {
@@ -70,7 +67,6 @@ fn save_new_metas(
schema,
opstamp: 0u64,
payload: None,
codec,
},
directory,
)?;
@@ -105,21 +101,18 @@ fn save_new_metas(
/// };
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
/// ```
pub struct IndexBuilder<Codec: crate::codec::Codec = StandardCodec> {
pub struct IndexBuilder {
schema: Option<Schema>,
index_settings: IndexSettings,
tokenizer_manager: TokenizerManager,
fast_field_tokenizer_manager: TokenizerManager,
codec: Codec,
}
impl Default for IndexBuilder<StandardCodec> {
impl Default for IndexBuilder {
fn default() -> Self {
IndexBuilder::new()
}
}
impl IndexBuilder<StandardCodec> {
impl IndexBuilder {
/// Creates a new `IndexBuilder`
pub fn new() -> Self {
Self {
@@ -127,21 +120,6 @@ impl IndexBuilder<StandardCodec> {
index_settings: IndexSettings::default(),
tokenizer_manager: TokenizerManager::default(),
fast_field_tokenizer_manager: TokenizerManager::default(),
codec: StandardCodec,
}
}
}
impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
/// Set the codec
#[must_use]
pub fn codec<NewCodec: crate::codec::Codec>(self, codec: NewCodec) -> IndexBuilder<NewCodec> {
IndexBuilder {
schema: self.schema,
index_settings: self.index_settings,
tokenizer_manager: self.tokenizer_manager,
fast_field_tokenizer_manager: self.fast_field_tokenizer_manager,
codec,
}
}
@@ -176,7 +154,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
/// The index will be allocated in anonymous memory.
/// This is useful for indexing small set of documents
/// for instances like unit test or temporary in memory index.
pub fn create_in_ram(self) -> Result<Index<Codec>, TantivyError> {
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
let ram_directory = RamDirectory::create();
self.create(ram_directory)
}
@@ -187,7 +165,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
/// If a previous index was in this directory, it returns an
/// [`TantivyError::IndexAlreadyExists`] error.
#[cfg(feature = "mmap")]
pub fn create_in_dir<P: AsRef<Path>>(self, directory_path: P) -> crate::Result<Index<Codec>> {
pub fn create_in_dir<P: AsRef<Path>>(self, directory_path: P) -> crate::Result<Index> {
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::open(directory_path)?);
if Index::exists(&*mmap_directory)? {
return Err(TantivyError::IndexAlreadyExists);
@@ -208,7 +186,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
self,
dir: impl Into<Box<dyn Directory>>,
mem_budget: usize,
) -> crate::Result<SingleSegmentIndexWriter<Codec, D>> {
) -> crate::Result<SingleSegmentIndexWriter<D>> {
let index = self.create(dir)?;
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
Ok(index_simple_writer)
@@ -224,7 +202,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
/// For other unit tests, prefer the [`RamDirectory`], see:
/// [`IndexBuilder::create_in_ram()`].
#[cfg(feature = "mmap")]
pub fn create_from_tempdir(self) -> crate::Result<Index<Codec>> {
pub fn create_from_tempdir(self) -> crate::Result<Index> {
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::create_from_tempdir()?);
self.create(mmap_directory)
}
@@ -237,15 +215,12 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
}
/// Opens or creates a new index in the provided directory
pub fn open_or_create<T: Into<Box<dyn Directory>>>(
self,
dir: T,
) -> crate::Result<Index<Codec>> {
pub fn open_or_create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
let dir: Box<dyn Directory> = dir.into();
if !Index::exists(&*dir)? {
return self.create(dir);
}
let mut index: Index<Codec> = Index::<Codec>::open_with_codec(dir)?;
let mut index = Index::open(dir)?;
index.set_tokenizers(self.tokenizer_manager.clone());
if index.schema() == self.get_expect_schema()? {
Ok(index)
@@ -269,25 +244,18 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
/// Creates a new index given an implementation of the trait `Directory`.
///
/// If a directory previously existed, it will be erased.
pub fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index<Codec>> {
self.create_avoid_monomorphization(dir.into())
}
fn create_avoid_monomorphization(self, dir: Box<dyn Directory>) -> crate::Result<Index<Codec>> {
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
self.validate()?;
let dir = dir.into();
let directory = ManagedDirectory::wrap(dir)?;
let codec: CodecConfiguration = CodecConfiguration::from(&self.codec);
save_new_metas(
self.get_expect_schema()?,
self.index_settings.clone(),
&directory,
codec,
)?;
let schema = self.get_expect_schema()?;
let mut metas = IndexMeta::with_schema_and_codec(schema, &self.codec);
let mut metas = IndexMeta::with_schema(self.get_expect_schema()?);
metas.index_settings = self.index_settings;
let mut index: Index<Codec> =
Index::<Codec>::open_from_metas(directory, &metas, SegmentMetaInventory::default())?;
let mut index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default());
index.set_tokenizers(self.tokenizer_manager);
index.set_fast_field_tokenizers(self.fast_field_tokenizer_manager);
Ok(index)
@@ -296,7 +264,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
/// Search Index
#[derive(Clone)]
pub struct Index<Codec: crate::codec::Codec = crate::codec::StandardCodec> {
pub struct Index {
directory: ManagedDirectory,
schema: Schema,
settings: IndexSettings,
@@ -304,7 +272,6 @@ pub struct Index<Codec: crate::codec::Codec = crate::codec::StandardCodec> {
tokenizers: TokenizerManager,
fast_field_tokenizers: TokenizerManager,
inventory: SegmentMetaInventory,
codec: Codec,
}
impl Index {
@@ -312,6 +279,41 @@ impl Index {
pub fn builder() -> IndexBuilder {
IndexBuilder::new()
}
/// Examines the directory to see if it contains an index.
///
/// Effectively, it only checks for the presence of the `meta.json` file.
pub fn exists(dir: &dyn Directory) -> Result<bool, OpenReadError> {
dir.exists(&META_FILEPATH)
}
/// Accessor to the search executor.
///
/// This pool is used by default when calling `searcher.search(...)`
/// to perform search on the individual segments.
///
/// By default the executor is single thread, and simply runs in the calling thread.
pub fn search_executor(&self) -> &Executor {
&self.executor
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
self.executor = Executor::multi_thread(num_threads, "tantivy-search-")?;
Ok(())
}
/// Custom thread pool by a outer thread pool.
pub fn set_executor(&mut self, executor: Executor) {
self.executor = executor;
}
/// Replace the default single thread search executor pool
/// by a thread pool with as many threads as there are CPUs on the system.
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
let default_num_threads = available_parallelism()?.get();
self.set_multithread_executor(default_num_threads)
}
/// Creates a new index using the [`RamDirectory`].
///
@@ -322,13 +324,6 @@ impl Index {
IndexBuilder::new().schema(schema).create_in_ram().unwrap()
}
/// Examines the directory to see if it contains an index.
///
/// Effectively, it only checks for the presence of the `meta.json` file.
pub fn exists(directory: &dyn Directory) -> Result<bool, OpenReadError> {
directory.exists(&META_FILEPATH)
}
/// Creates a new index in a given filepath.
/// The index will use the [`MmapDirectory`].
///
@@ -375,107 +370,20 @@ impl Index {
schema: Schema,
settings: IndexSettings,
) -> crate::Result<Index> {
Self::create_to_avoid_monomorphization(dir.into(), schema, settings)
}
fn create_to_avoid_monomorphization(
dir: Box<dyn Directory>,
schema: Schema,
settings: IndexSettings,
) -> crate::Result<Index> {
let dir: Box<dyn Directory> = dir.into();
let mut builder = IndexBuilder::new().schema(schema);
builder = builder.settings(settings);
builder.create(dir)
}
/// Opens a new directory from an index path.
#[cfg(feature = "mmap")]
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> crate::Result<Index> {
Self::open_in_dir_to_avoid_monomorphization(directory_path.as_ref())
}
#[inline(never)]
fn open_in_dir_to_avoid_monomorphization(directory_path: &Path) -> crate::Result<Index> {
let mmap_directory = MmapDirectory::open(directory_path)?;
Index::open(mmap_directory)
}
/// Open the index using the provided directory
pub fn open<T: Into<Box<dyn Directory>>>(directory: T) -> crate::Result<Index> {
Index::<StandardCodec>::open_with_codec(directory.into())
}
}
impl<Codec: crate::codec::Codec> Index<Codec> {
/// Returns a version of this index with the standard codec.
/// This is useful when you need to pass the index to APIs that
/// don't care about the codec (e.g., for reading).
pub(crate) fn with_standard_codec(&self) -> Index<StandardCodec> {
Index {
directory: self.directory.clone(),
schema: self.schema.clone(),
settings: self.settings.clone(),
executor: self.executor.clone(),
tokenizers: self.tokenizers.clone(),
fast_field_tokenizers: self.fast_field_tokenizers.clone(),
inventory: self.inventory.clone(),
codec: StandardCodec,
}
}
/// Open the index using the provided directory
#[inline(never)]
pub fn open_with_codec(directory: Box<dyn Directory>) -> crate::Result<Index<Codec>> {
let directory = ManagedDirectory::wrap(directory)?;
let inventory = SegmentMetaInventory::default();
let metas = load_metas(&directory, &inventory)?;
let index: Index<Codec> = Index::<Codec>::open_from_metas(directory, &metas, inventory)?;
Ok(index)
}
/// Accessor to the codec.
pub fn codec(&self) -> &Codec {
&self.codec
}
/// Accessor to the search executor.
///
/// This pool is used by default when calling `searcher.search(...)`
/// to perform search on the individual segments.
///
/// By default the executor is single thread, and simply runs in the calling thread.
pub fn search_executor(&self) -> &Executor {
&self.executor
}
/// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
self.executor = Executor::multi_thread(num_threads, "tantivy-search-")?;
Ok(())
}
/// Custom thread pool by a outer thread pool.
pub fn set_executor(&mut self, executor: Executor) {
self.executor = executor;
}
/// Replace the default single thread search executor pool
/// by a thread pool with as many threads as there are CPUs on the system.
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
let default_num_threads = available_parallelism()?.get();
self.set_multithread_executor(default_num_threads)
}
/// Creates a new index given a directory and an [`IndexMeta`].
fn open_from_metas<C: crate::codec::Codec>(
fn open_from_metas(
directory: ManagedDirectory,
metas: &IndexMeta,
inventory: SegmentMetaInventory,
) -> crate::Result<Index<C>> {
) -> Index {
let schema = metas.schema.clone();
let codec = metas.codec.to_codec::<C>()?;
Ok(Index {
Index {
settings: metas.index_settings.clone(),
directory,
schema,
@@ -483,8 +391,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
fast_field_tokenizers: TokenizerManager::default(),
executor: Executor::single_thread(),
inventory,
codec,
})
}
}
/// Setter for the tokenizer manager.
@@ -540,7 +447,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
/// Create a default [`IndexReader`] for the given index.
///
/// See [`Index.reader_builder()`].
pub fn reader(&self) -> crate::Result<IndexReader<Codec>> {
pub fn reader(&self) -> crate::Result<IndexReader> {
self.reader_builder().try_into()
}
@@ -548,10 +455,17 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
///
/// Most project should create at most one reader for a given index.
/// This method is typically called only once per `Index` instance.
pub fn reader_builder(&self) -> IndexReaderBuilder<Codec> {
pub fn reader_builder(&self) -> IndexReaderBuilder {
IndexReaderBuilder::new(self.clone())
}
/// Opens a new directory from an index path.
#[cfg(feature = "mmap")]
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> crate::Result<Index> {
let mmap_directory = MmapDirectory::open(directory_path)?;
Index::open(mmap_directory)
}
/// Returns the list of the segment metas tracked by the index.
///
/// Such segments can of course be part of the index,
@@ -592,6 +506,16 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
self.inventory.new_segment_meta(segment_id, max_doc)
}
/// Open the index using the provided directory
pub fn open<T: Into<Box<dyn Directory>>>(directory: T) -> crate::Result<Index> {
let directory = directory.into();
let directory = ManagedDirectory::wrap(directory)?;
let inventory = SegmentMetaInventory::default();
let metas = load_metas(&directory, &inventory)?;
let index = Index::open_from_metas(directory, &metas, inventory);
Ok(index)
}
/// Reads the index meta file from the directory.
pub fn load_metas(&self) -> crate::Result<IndexMeta> {
load_metas(self.directory(), &self.inventory)
@@ -615,7 +539,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
pub fn writer_with_options<D: Document>(
&self,
options: IndexWriterOptions,
) -> crate::Result<IndexWriter<Codec, D>> {
) -> crate::Result<IndexWriter<D>> {
let directory_lock = self
.directory
.acquire_lock(&INDEX_WRITER_LOCK)
@@ -657,7 +581,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
&self,
num_threads: usize,
overall_memory_budget_in_bytes: usize,
) -> crate::Result<IndexWriter<Codec, D>> {
) -> crate::Result<IndexWriter<D>> {
let memory_arena_in_bytes_per_thread = overall_memory_budget_in_bytes / num_threads;
let options = IndexWriterOptions::builder()
.num_worker_threads(num_threads)
@@ -671,7 +595,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
/// That index writer only simply has a single thread and a memory budget of 15 MB.
/// Using a single thread gives us a deterministic allocation of DocId.
#[cfg(test)]
pub fn writer_for_tests<D: Document>(&self) -> crate::Result<IndexWriter<Codec, D>> {
pub fn writer_for_tests<D: Document>(&self) -> crate::Result<IndexWriter<D>> {
self.writer_with_num_threads(1, MEMORY_BUDGET_NUM_BYTES_MIN)
}
@@ -689,7 +613,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
pub fn writer<D: Document>(
&self,
memory_budget_in_bytes: usize,
) -> crate::Result<IndexWriter<Codec, D>> {
) -> crate::Result<IndexWriter<D>> {
let mut num_threads = std::cmp::min(available_parallelism()?.get(), MAX_NUM_THREAD);
let memory_budget_num_bytes_per_thread = memory_budget_in_bytes / num_threads;
if memory_budget_num_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
@@ -716,7 +640,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
}
/// Returns the list of segments that are searchable
pub fn searchable_segments(&self) -> crate::Result<Vec<Segment<Codec>>> {
pub fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
Ok(self
.searchable_segment_metas()?
.into_iter()
@@ -725,12 +649,12 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
}
#[doc(hidden)]
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment<Codec> {
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
Segment::for_index(self.clone(), segment_meta)
}
/// Creates a new segment.
pub fn new_segment(&self) -> Segment<Codec> {
pub fn new_segment(&self) -> Segment {
let segment_meta = self
.inventory
.new_segment_meta(SegmentId::generate_random(), 0);

View File

@@ -7,8 +7,7 @@ use std::sync::Arc;
use serde::{Deserialize, Serialize};
use super::SegmentComponent;
use crate::codec::Codec;
use crate::index::{CodecConfiguration, SegmentId};
use crate::index::SegmentId;
use crate::schema::Schema;
use crate::store::Compressor;
use crate::{Inventory, Opstamp, TrackedObject};
@@ -321,8 +320,6 @@ pub struct IndexMeta {
/// This payload is entirely unused by tantivy.
#[serde(skip_serializing_if = "Option::is_none")]
pub payload: Option<String>,
/// Codec configuration for the index.
pub codec: CodecConfiguration,
}
#[derive(Deserialize, Debug)]
@@ -334,8 +331,6 @@ struct UntrackedIndexMeta {
pub opstamp: Opstamp,
#[serde(skip_serializing_if = "Option::is_none")]
pub payload: Option<String>,
#[serde(default)]
pub codec: CodecConfiguration,
}
impl UntrackedIndexMeta {
@@ -350,7 +345,6 @@ impl UntrackedIndexMeta {
schema: self.schema,
opstamp: self.opstamp,
payload: self.payload,
codec: self.codec,
}
}
}
@@ -361,14 +355,13 @@ impl IndexMeta {
///
/// This new index does not contains any segments.
/// Opstamp will the value `0u64`.
pub fn with_schema_and_codec<C: Codec>(schema: Schema, codec: &C) -> IndexMeta {
pub fn with_schema(schema: Schema) -> IndexMeta {
IndexMeta {
index_settings: IndexSettings::default(),
segments: vec![],
schema,
opstamp: 0u64,
payload: None,
codec: CodecConfiguration::from(codec),
}
}
@@ -419,12 +412,11 @@ mod tests {
schema,
opstamp: 0u64,
payload: None,
codec: Default::default(),
};
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!(
json,
r#"{"index_settings":{"docstore_compression":"none","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0,"codec":{"name":"standard"}}"#
r#"{"index_settings":{"docstore_compression":"none","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
);
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();

View File

@@ -1,8 +1,7 @@
use std::io;
use std::sync::Arc;
use common::json_path_writer::JSON_END_OF_PATH;
use common::{BinarySerializable, ByteCount, OwnedBytes};
use common::{BinarySerializable, ByteCount};
#[cfg(feature = "quickwit")]
use futures_util::{FutureExt, StreamExt, TryStreamExt};
#[cfg(feature = "quickwit")]
@@ -10,13 +9,9 @@ use itertools::Itertools;
#[cfg(feature = "quickwit")]
use tantivy_fst::automaton::{AlwaysMatch, Automaton};
use crate::codec::postings::PostingsCodec;
use crate::codec::{Codec, ObjectSafeCodec, StandardCodec};
use crate::directory::FileSlice;
use crate::fieldnorm::FieldNormReader;
use crate::postings::{Postings, TermInfo};
use crate::query::term_query::TermScorer;
use crate::query::{Bm25Weight, PhraseScorer, Scorer};
use crate::positions::PositionReader;
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo};
use crate::schema::{IndexRecordOption, Term, Type};
use crate::termdict::TermDictionary;
@@ -38,7 +33,6 @@ pub struct InvertedIndexReader {
positions_file_slice: FileSlice,
record_option: IndexRecordOption,
total_num_tokens: u64,
codec: Arc<dyn ObjectSafeCodec>,
}
/// Object that records the amount of space used by a field in an inverted index.
@@ -74,7 +68,6 @@ impl InvertedIndexReader {
postings_file_slice: FileSlice,
positions_file_slice: FileSlice,
record_option: IndexRecordOption,
codec: Arc<dyn ObjectSafeCodec>,
) -> io::Result<InvertedIndexReader> {
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8);
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?;
@@ -84,7 +77,6 @@ impl InvertedIndexReader {
positions_file_slice,
record_option,
total_num_tokens,
codec,
})
}
@@ -97,7 +89,6 @@ impl InvertedIndexReader {
positions_file_slice: FileSlice::empty(),
record_option,
total_num_tokens: 0u64,
codec: Arc::new(StandardCodec),
}
}
@@ -169,98 +160,61 @@ impl InvertedIndexReader {
Ok(fields)
}
pub(crate) fn new_term_scorer_specialized<C: Codec>(
&self,
term_info: &TermInfo,
option: IndexRecordOption,
fieldnorm_reader: FieldNormReader,
similarity_weight: Bm25Weight,
codec: &C,
) -> io::Result<TermScorer<<<C as Codec>::PostingsCodec as PostingsCodec>::Postings>> {
let postings = self.read_postings_from_terminfo_specialized(term_info, option, codec)?;
let term_scorer = TermScorer::new(postings, fieldnorm_reader, similarity_weight);
Ok(term_scorer)
}
pub(crate) fn new_phrase_scorer_type_specialized<C: Codec>(
&self,
term_infos: &[(usize, TermInfo)],
similarity_weight_opt: Option<Bm25Weight>,
fieldnorm_reader: FieldNormReader,
slop: u32,
codec: &C,
) -> io::Result<PhraseScorer<<<C as Codec>::PostingsCodec as PostingsCodec>::Postings>> {
let mut offset_and_term_postings: Vec<(
usize,
<<C as Codec>::PostingsCodec as PostingsCodec>::Postings,
)> = Vec::with_capacity(term_infos.len());
for (offset, term_info) in term_infos {
let postings = self.read_postings_from_terminfo_specialized(
term_info,
IndexRecordOption::WithFreqsAndPositions,
codec,
)?;
offset_and_term_postings.push((*offset, postings));
}
let phrase_scorer = PhraseScorer::new(
offset_and_term_postings,
similarity_weight_opt,
fieldnorm_reader,
slop,
);
Ok(phrase_scorer)
}
/// Build a new term scorer.
pub fn new_term_scorer(
&self,
term_info: &TermInfo,
option: IndexRecordOption,
fieldnorm_reader: FieldNormReader,
similarity_weight: Bm25Weight,
) -> io::Result<Box<dyn Scorer>> {
let term_scorer = self.codec.load_term_scorer_type_erased(
term_info,
option,
self,
fieldnorm_reader,
similarity_weight,
)?;
Ok(term_scorer)
}
/// Returns a postings object specific with a concrete type.
/// Resets the block segment to another position of the postings
/// file.
///
/// This requires you to provied the actual codec.
pub fn read_postings_from_terminfo_specialized<C: Codec>(
/// This is useful for enumerating through a list of terms,
/// and consuming the associated posting lists while avoiding
/// reallocating a [`BlockSegmentPostings`].
///
/// # Warning
///
/// This does not reset the positions list.
pub fn reset_block_postings_from_terminfo(
&self,
term_info: &TermInfo,
block_postings: &mut BlockSegmentPostings,
) -> io::Result<()> {
let postings_slice = self
.postings_file_slice
.slice(term_info.postings_range.clone());
let postings_bytes = postings_slice.read_bytes()?;
block_postings.reset(term_info.doc_freq, postings_bytes)?;
Ok(())
}
/// Returns a block postings given a `Term`.
/// This method is for an advanced usage only.
///
/// Most users should prefer using [`Self::read_postings()`] instead.
pub fn read_block_postings(
&self,
term: &Term,
option: IndexRecordOption,
codec: &C,
) -> io::Result<<<C as Codec>::PostingsCodec as PostingsCodec>::Postings> {
let option = option.downgrade(self.record_option);
) -> io::Result<Option<BlockSegmentPostings>> {
self.get_term_info(term)?
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
.transpose()
}
/// Returns a block postings given a `term_info`.
/// This method is for an advanced usage only.
///
/// Most users should prefer using [`Self::read_postings()`] instead.
pub fn read_block_postings_from_terminfo(
&self,
term_info: &TermInfo,
requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> {
let postings_data = self
.postings_file_slice
.slice(term_info.postings_range.clone())
.read_bytes()?;
let positions_data: Option<OwnedBytes> = if option.has_positions() {
let positions_data = self
.positions_file_slice
.slice(term_info.positions_range.clone())
.read_bytes()?;
Some(positions_data)
} else {
None
};
let postings: <<C as Codec>::PostingsCodec as PostingsCodec>::Postings =
codec.postings_codec().load_postings(
term_info.doc_freq,
postings_data,
self.record_option,
option,
positions_data,
)?;
Ok(postings)
.slice(term_info.postings_range.clone());
BlockSegmentPostings::open(
term_info.doc_freq,
postings_data,
self.record_option,
requested_option,
)
}
/// Returns a posting object given a `term_info`.
@@ -271,9 +225,25 @@ impl InvertedIndexReader {
&self,
term_info: &TermInfo,
option: IndexRecordOption,
) -> io::Result<Box<dyn Postings>> {
self.codec
.load_postings_type_erased(term_info, option, self)
) -> io::Result<SegmentPostings> {
let option = option.downgrade(self.record_option);
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
let position_reader = {
if option.has_positions() {
let positions_data = self
.positions_file_slice
.read_bytes_slice(term_info.positions_range.clone())?;
let position_reader = PositionReader::open(positions_data)?;
Some(position_reader)
} else {
None
}
};
Ok(SegmentPostings::from_block_postings(
block_postings,
position_reader,
))
}
/// Returns the total number of tokens recorded for all documents
@@ -296,7 +266,7 @@ impl InvertedIndexReader {
&self,
term: &Term,
option: IndexRecordOption,
) -> io::Result<Option<Box<dyn Postings>>> {
) -> io::Result<Option<SegmentPostings>> {
self.get_term_info(term)?
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
.transpose()

View File

@@ -2,7 +2,6 @@
//!
//! It contains `Index` and `Segment`, where a `Index` consists of one or more `Segment`s.
mod codec_configuration;
mod index;
mod index_meta;
mod inverted_index_reader;
@@ -11,7 +10,6 @@ mod segment_component;
mod segment_id;
mod segment_reader;
pub use self::codec_configuration::CodecConfiguration;
pub use self::index::{Index, IndexBuilder};
pub(crate) use self::index_meta::SegmentMetaInventory;
pub use self::index_meta::{IndexMeta, IndexSettings, Order, SegmentMeta};

View File

@@ -2,7 +2,6 @@ use std::fmt;
use std::path::PathBuf;
use super::SegmentComponent;
use crate::codec::StandardCodec;
use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::{Directory, FileSlice, WritePtr};
use crate::index::{Index, SegmentId, SegmentMeta};
@@ -11,25 +10,25 @@ use crate::Opstamp;
/// A segment is a piece of the index.
#[derive(Clone)]
pub struct Segment<C: crate::codec::Codec = StandardCodec> {
index: Index<C>,
pub struct Segment {
index: Index,
meta: SegmentMeta,
}
impl<C: crate::codec::Codec> fmt::Debug for Segment<C> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
impl fmt::Debug for Segment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Segment({:?})", self.id().uuid_string())
}
}
impl<C: crate::codec::Codec> Segment<C> {
impl Segment {
/// Creates a new segment given an `Index` and a `SegmentId`
pub(crate) fn for_index(index: Index<C>, meta: SegmentMeta) -> Segment<C> {
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
Segment { index, meta }
}
/// Returns the index the segment belongs to.
pub fn index(&self) -> &Index<C> {
pub fn index(&self) -> &Index {
&self.index
}
@@ -47,7 +46,7 @@ impl<C: crate::codec::Codec> Segment<C> {
///
/// This method is only used when updating `max_doc` from 0
/// as we finalize a fresh new segment.
pub fn with_max_doc(self, max_doc: u32) -> Segment<C> {
pub fn with_max_doc(self, max_doc: u32) -> Segment {
Segment {
index: self.index,
meta: self.meta.with_max_doc(max_doc),
@@ -56,7 +55,7 @@ impl<C: crate::codec::Codec> Segment<C> {
#[doc(hidden)]
#[must_use]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment<C> {
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
Segment {
index: self.index,
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),

View File

@@ -6,7 +6,6 @@ use common::{ByteCount, HasLen};
use fnv::FnvHashMap;
use itertools::Itertools;
use crate::codec::ObjectSafeCodec;
use crate::directory::{CompositeFile, FileSlice};
use crate::error::DataCorruption;
use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders};
@@ -48,8 +47,6 @@ pub struct SegmentReader {
store_file: FileSlice,
alive_bitset_opt: Option<AliveBitSet>,
schema: Schema,
pub(crate) codec: Arc<dyn ObjectSafeCodec>,
}
impl SegmentReader {
@@ -143,16 +140,15 @@ impl SegmentReader {
}
/// Open a new segment for reading.
pub fn open<C: crate::codec::Codec>(segment: &Segment<C>) -> crate::Result<SegmentReader> {
pub fn open(segment: &Segment) -> crate::Result<SegmentReader> {
Self::open_with_custom_alive_set(segment, None)
}
/// Open a new segment for reading.
pub fn open_with_custom_alive_set<C: crate::codec::Codec>(
segment: &Segment<C>,
pub fn open_with_custom_alive_set(
segment: &Segment,
custom_bitset: Option<AliveBitSet>,
) -> crate::Result<SegmentReader> {
let codec: Arc<dyn ObjectSafeCodec> = Arc::new(segment.index().codec().clone());
let termdict_file = segment.open_read(SegmentComponent::Terms)?;
let termdict_composite = CompositeFile::open(&termdict_file)?;
@@ -208,7 +204,6 @@ impl SegmentReader {
alive_bitset_opt,
positions_composite,
schema,
codec,
})
}
@@ -278,7 +273,6 @@ impl SegmentReader {
postings_file,
positions_file,
record_option,
self.codec.clone(),
)?);
// by releasing the lock in between, we may end up opening the inverting index

View File

@@ -9,7 +9,6 @@ use smallvec::smallvec;
use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater;
use super::{AddBatch, AddBatchReceiver, AddBatchSender, PreparedCommit};
use crate::codec::{Codec, StandardCodec};
use crate::directory::{DirectoryLock, GarbageCollectionResult, TerminatingWrite};
use crate::error::TantivyError;
use crate::fastfield::write_alive_bitset;
@@ -69,12 +68,12 @@ pub struct IndexWriterOptions {
/// indexing queue.
/// Each indexing thread builds its own independent [`Segment`], via
/// a `SegmentWriter` object.
pub struct IndexWriter<C: Codec = StandardCodec, D: Document = TantivyDocument> {
pub struct IndexWriter<D: Document = TantivyDocument> {
// the lock is just used to bind the
// lifetime of the lock with that of the IndexWriter.
_directory_lock: Option<DirectoryLock>,
index: Index<C>,
index: Index,
options: IndexWriterOptions,
@@ -83,7 +82,7 @@ pub struct IndexWriter<C: Codec = StandardCodec, D: Document = TantivyDocument>
index_writer_status: IndexWriterStatus<D>,
operation_sender: AddBatchSender<D>,
segment_updater: SegmentUpdater<C>,
segment_updater: SegmentUpdater,
worker_id: usize,
@@ -129,8 +128,8 @@ fn compute_deleted_bitset(
/// is `==` target_opstamp.
/// For instance, there was no delete operation between the state of the `segment_entry` and
/// the `target_opstamp`, `segment_entry` is not updated.
pub fn advance_deletes<C: Codec>(
mut segment: Segment<C>,
pub fn advance_deletes(
mut segment: Segment,
segment_entry: &mut SegmentEntry,
target_opstamp: Opstamp,
) -> crate::Result<()> {
@@ -180,11 +179,11 @@ pub fn advance_deletes<C: Codec>(
Ok(())
}
fn index_documents<C: crate::codec::Codec, D: Document>(
fn index_documents<D: Document>(
memory_budget: usize,
segment: Segment<C>,
segment: Segment,
grouped_document_iterator: &mut dyn Iterator<Item = AddBatch<D>>,
segment_updater: &SegmentUpdater<C>,
segment_updater: &SegmentUpdater,
mut delete_cursor: DeleteCursor,
) -> crate::Result<()> {
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone())?;
@@ -227,8 +226,8 @@ fn index_documents<C: crate::codec::Codec, D: Document>(
}
/// `doc_opstamps` is required to be non-empty.
fn apply_deletes<C: crate::codec::Codec>(
segment: &Segment<C>,
fn apply_deletes(
segment: &Segment,
delete_cursor: &mut DeleteCursor,
doc_opstamps: &[Opstamp],
) -> crate::Result<Option<BitSet>> {
@@ -263,7 +262,7 @@ fn apply_deletes<C: crate::codec::Codec>(
})
}
impl<C: Codec, D: Document> IndexWriter<C, D> {
impl<D: Document> IndexWriter<D> {
/// Create a new index writer. Attempts to acquire a lockfile.
///
/// The lockfile should be deleted on drop, but it is possible
@@ -279,7 +278,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
/// If the memory arena per thread is too small or too big, returns
/// `TantivyError::InvalidArgument`
pub(crate) fn new(
index: &Index<C>,
index: &Index,
options: IndexWriterOptions,
directory_lock: DirectoryLock,
) -> crate::Result<Self> {
@@ -346,7 +345,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
}
/// Accessor to the index.
pub fn index(&self) -> &Index<C> {
pub fn index(&self) -> &Index {
&self.index
}
@@ -394,7 +393,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
/// It is safe to start writing file associated with the new `Segment`.
/// These will not be garbage collected as long as an instance object of
/// `SegmentMeta` object associated with the new `Segment` is "alive".
pub fn new_segment(&self) -> Segment<C> {
pub fn new_segment(&self) -> Segment {
self.index.new_segment()
}
@@ -616,7 +615,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
/// It is also possible to add a payload to the `commit`
/// using this API.
/// See [`PreparedCommit::set_payload()`].
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit<'_, C, D>> {
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit<'_, D>> {
// Here, because we join all of the worker threads,
// all of the segment update for this commit have been
// sent.
@@ -666,7 +665,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
self.prepare_commit()?.commit()
}
pub(crate) fn segment_updater(&self) -> &SegmentUpdater<C> {
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
&self.segment_updater
}
@@ -805,7 +804,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
}
}
impl<C: Codec, D: Document> Drop for IndexWriter<C, D> {
impl<D: Document> Drop for IndexWriter<D> {
fn drop(&mut self) {
self.segment_updater.kill();
self.drop_sender();

View File

@@ -1,10 +1,9 @@
#[cfg(test)]
mod tests {
use crate::codec::StandardCodec;
use crate::collector::TopDocs;
use crate::fastfield::AliveBitSet;
use crate::index::Index;
use crate::postings::{DocFreq, Postings};
use crate::postings::Postings;
use crate::query::QueryParser;
use crate::schema::{
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
@@ -122,26 +121,21 @@ mod tests {
let my_text_field = index.schema().get_field("text_field").unwrap();
let term_a = Term::from_field_text(my_text_field, "text");
let inverted_index = segment_reader.inverted_index(my_text_field).unwrap();
let term_info = inverted_index.get_term_info(&term_a).unwrap().unwrap();
let mut postings = inverted_index
.read_postings_from_terminfo_specialized(
&term_info,
IndexRecordOption::WithFreqsAndPositions,
&StandardCodec,
)
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap()
.unwrap();
assert_eq!(postings.doc_freq(), DocFreq::Exact(2));
assert_eq!(postings.doc_freq(), 2);
let fallback_bitset = AliveBitSet::for_test_from_deleted_docs(&[0], 100);
assert_eq!(
crate::indexer::merger::doc_freq_given_deletes(
&postings,
postings.doc_freq_given_deletes(
segment_reader.alive_bitset().unwrap_or(&fallback_bitset)
),
2
);
assert_eq!(postings.term_freq(), 1);
let mut output = Vec::new();
let mut output = vec![];
postings.positions(&mut output);
assert_eq!(output, vec![1]);
postings.advance();

View File

@@ -7,8 +7,6 @@ use common::ReadOnlyBitSet;
use itertools::Itertools;
use measure_time::debug_time;
use crate::codec::postings::PostingsCodec;
use crate::codec::{Codec, StandardCodec};
use crate::directory::WritePtr;
use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption;
@@ -17,7 +15,7 @@ use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer,
use crate::index::{Segment, SegmentComponent, SegmentReader};
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
use crate::indexer::SegmentSerializer;
use crate::postings::{InvertedIndexSerializer, Postings};
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema};
use crate::store::StoreWriter;
use crate::termdict::{TermMerger, TermOrdinal};
@@ -78,11 +76,10 @@ fn estimate_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::
Ok(total_num_tokens)
}
pub struct IndexMerger<C: Codec = StandardCodec> {
pub struct IndexMerger {
schema: Schema,
pub(crate) readers: Vec<SegmentReader>,
max_doc: u32,
codec: C,
}
struct DeltaComputer {
@@ -147,8 +144,8 @@ fn extract_fast_field_required_columns(schema: &Schema) -> Vec<(String, ColumnTy
.collect()
}
impl<C: Codec> IndexMerger<C> {
pub fn open(schema: Schema, segments: &[Segment<C>]) -> crate::Result<IndexMerger<C>> {
impl IndexMerger {
pub fn open(schema: Schema, segments: &[Segment]) -> crate::Result<IndexMerger> {
let alive_bitset = segments.iter().map(|_| None).collect_vec();
Self::open_with_custom_alive_set(schema, segments, alive_bitset)
}
@@ -165,15 +162,11 @@ impl<C: Codec> IndexMerger<C> {
// This can be used to merge but also apply an additional filter.
// One use case is demux, which is basically taking a list of
// segments and partitions them e.g. by a value in a field.
//
// # Panics if segments is empty.
pub fn open_with_custom_alive_set(
schema: Schema,
segments: &[Segment<C>],
segments: &[Segment],
alive_bitset_opt: Vec<Option<AliveBitSet>>,
) -> crate::Result<IndexMerger<C>> {
assert!(!segments.is_empty());
let codec = segments[0].index().codec().clone();
) -> crate::Result<IndexMerger> {
let mut readers = vec![];
for (segment, new_alive_bitset_opt) in segments.iter().zip(alive_bitset_opt) {
if segment.meta().num_docs() > 0 {
@@ -196,7 +189,6 @@ impl<C: Codec> IndexMerger<C> {
schema,
readers,
max_doc,
codec,
})
}
@@ -295,7 +287,7 @@ impl<C: Codec> IndexMerger<C> {
&self,
indexed_field: Field,
_field_type: &FieldType,
serializer: &mut InvertedIndexSerializer<C>,
serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
@@ -363,10 +355,7 @@ impl<C: Codec> IndexMerger<C> {
indexed. Have you modified the schema?",
);
let mut segment_postings_containing_the_term: Vec<(
usize,
<C::PostingsCodec as PostingsCodec>::Postings,
)> = Vec::with_capacity(self.readers.len());
let mut segment_postings_containing_the_term: Vec<(usize, SegmentPostings)> = vec![];
while merged_terms.advance() {
segment_postings_containing_the_term.clear();
@@ -378,24 +367,17 @@ impl<C: Codec> IndexMerger<C> {
for (segment_ord, term_info) in merged_terms.current_segment_ords_and_term_infos() {
let segment_reader = &self.readers[segment_ord];
let inverted_index: &InvertedIndexReader = &field_readers[segment_ord];
let postings = inverted_index.read_postings_from_terminfo_specialized(
&term_info,
segment_postings_option,
&self.codec,
)?;
let segment_postings = inverted_index
.read_postings_from_terminfo(&term_info, segment_postings_option)?;
let alive_bitset_opt = segment_reader.alive_bitset();
let doc_freq = if let Some(alive_bitset) = alive_bitset_opt {
doc_freq_given_deletes(&postings, alive_bitset)
segment_postings.doc_freq_given_deletes(alive_bitset)
} else {
// We do not an exact document frequency here.
match postings.doc_freq() {
crate::postings::DocFreq::Approximate(_) => exact_doc_freq(&postings),
crate::postings::DocFreq::Exact(doc_freq) => doc_freq,
}
segment_postings.doc_freq()
};
if doc_freq > 0u32 {
total_doc_freq += doc_freq;
segment_postings_containing_the_term.push((segment_ord, postings));
segment_postings_containing_the_term.push((segment_ord, segment_postings));
}
}
@@ -413,7 +395,11 @@ impl<C: Codec> IndexMerger<C> {
assert!(!segment_postings_containing_the_term.is_empty());
let has_term_freq = {
let has_term_freq = segment_postings_containing_the_term[0].1.has_freq();
let has_term_freq = !segment_postings_containing_the_term[0]
.1
.block_cursor
.freqs()
.is_empty();
for (_, postings) in &segment_postings_containing_the_term[1..] {
// This may look at a strange way to test whether we have term freq or not.
// With JSON object, the schema is not sufficient to know whether a term
@@ -429,7 +415,7 @@ impl<C: Codec> IndexMerger<C> {
//
// Overall the reliable way to know if we have actual frequencies loaded or not
// is to check whether the actual decoded array is empty or not.
if postings.has_freq() != has_term_freq {
if has_term_freq == postings.block_cursor.freqs().is_empty() {
return Err(DataCorruption::comment_only(
"Term freqs are inconsistent across segments",
)
@@ -481,7 +467,7 @@ impl<C: Codec> IndexMerger<C> {
fn write_postings(
&self,
serializer: &mut InvertedIndexSerializer<C>,
serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
@@ -539,7 +525,7 @@ impl<C: Codec> IndexMerger<C> {
///
/// # Returns
/// The number of documents in the resulting segment.
pub fn write(&self, mut serializer: SegmentSerializer<C>) -> crate::Result<u32> {
pub fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
let doc_id_mapping = self.get_doc_id_from_concatenated_data()?;
debug!("write-fieldnorms");
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
@@ -567,43 +553,6 @@ impl<C: Codec> IndexMerger<C> {
}
}
/// Compute the number of non-deleted documents.
///
/// This method will clone and scan through the posting lists.
/// (this is a rather expensive operation).
pub(crate) fn doc_freq_given_deletes<P: Postings + Clone>(
postings: &P,
alive_bitset: &AliveBitSet,
) -> u32 {
let mut docset = postings.clone();
let mut doc_freq = 0;
loop {
let doc = docset.doc();
if doc == TERMINATED {
return doc_freq;
}
if alive_bitset.is_alive(doc) {
doc_freq += 1u32;
}
docset.advance();
}
}
/// If the postings is not able to inform us of the document frequency,
/// we just scan through it.
pub(crate) fn exact_doc_freq<P: Postings + Clone>(postings: &P) -> u32 {
let mut docset = postings.clone();
let mut doc_freq = 0;
loop {
let doc = docset.doc();
if doc == TERMINATED {
return doc_freq;
}
doc_freq += 1u32;
docset.advance();
}
}
#[cfg(test)]
mod tests {
@@ -612,16 +561,12 @@ mod tests {
use proptest::strategy::Strategy;
use schema::FAST;
use crate::codec::postings::PostingsCodec;
use crate::codec::standard::postings::StandardPostingsCodec;
use crate::collector::tests::{
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE,
};
use crate::collector::{Count, FacetCollector};
use crate::fastfield::AliveBitSet;
use crate::index::{Index, SegmentId};
use crate::indexer::NoMergePolicy;
use crate::postings::{DocFreq, Postings as _};
use crate::query::{AllQuery, BooleanQuery, EnableScoring, Scorer, TermQuery};
use crate::schema::{
Facet, FacetOptions, IndexRecordOption, NumericOptions, TantivyDocument, Term,
@@ -1573,10 +1518,10 @@ mod tests {
let searcher = reader.searcher();
let mut term_scorer = term_query
.specialized_weight(EnableScoring::enabled_from_searcher(&searcher))?
.term_scorer_for_test(searcher.segment_reader(0u32), 1.0)
.term_scorer_for_test(searcher.segment_reader(0u32), 1.0)?
.unwrap();
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.seek_block_max(0), 0.0079681855);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
for _ in 0..81 {
writer.add_document(doc!(text=>"hello happy tax payer"))?;
@@ -1589,13 +1534,13 @@ mod tests {
for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query
.specialized_weight(EnableScoring::enabled_from_searcher(&searcher))?
.term_scorer_for_test(segment_reader, 1.0)
.term_scorer_for_test(segment_reader, 1.0)?
.unwrap();
// the difference compared to before is intrinsic to the bm25 formula. no worries
// there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.seek_block_max(doc), 0.003478312);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
@@ -1615,12 +1560,12 @@ mod tests {
let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query
.specialized_weight(EnableScoring::enabled_from_searcher(&searcher))?
.term_scorer_for_test(segment_reader, 1.0)
.term_scorer_for_test(segment_reader, 1.0)?
.unwrap();
// the difference compared to before is intrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.seek_block_max(doc), 0.003478312);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
@@ -1634,16 +1579,4 @@ mod tests {
assert!(((super::MAX_DOC_LIMIT - 1) as i32) >= 0);
assert!((super::MAX_DOC_LIMIT as i32) < 0);
}
#[test]
fn test_doc_freq_given_delete() {
let docs =
<StandardPostingsCodec as PostingsCodec>::Postings::create_from_docs(&[0, 2, 10]);
assert_eq!(docs.doc_freq(), DocFreq::Exact(3));
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[2], 12);
assert_eq!(super::doc_freq_given_deletes(&docs, &alive_bitset), 2);
let all_deleted =
AliveBitSet::for_test_from_deleted_docs(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
assert_eq!(super::doc_freq_given_deletes(&docs, &all_deleted), 0);
}
}

View File

@@ -1,17 +1,16 @@
use super::IndexWriter;
use crate::codec::Codec;
use crate::schema::document::Document;
use crate::{FutureResult, Opstamp, TantivyDocument};
/// A prepared commit
pub struct PreparedCommit<'a, C: Codec, D: Document = TantivyDocument> {
index_writer: &'a mut IndexWriter<C, D>,
pub struct PreparedCommit<'a, D: Document = TantivyDocument> {
index_writer: &'a mut IndexWriter<D>,
payload: Option<String>,
opstamp: Opstamp,
}
impl<'a, C: Codec, D: Document> PreparedCommit<'a, C, D> {
pub(crate) fn new(index_writer: &'a mut IndexWriter<C, D>, opstamp: Opstamp) -> Self {
impl<'a, D: Document> PreparedCommit<'a, D> {
pub(crate) fn new(index_writer: &'a mut IndexWriter<D>, opstamp: Opstamp) -> Self {
Self {
index_writer,
payload: None,

View File

@@ -8,17 +8,17 @@ use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk
/// the data accumulated and sorted by the `SegmentWriter`.
pub struct SegmentSerializer<C: crate::codec::Codec> {
segment: Segment<C>,
pub struct SegmentSerializer {
segment: Segment,
pub(crate) store_writer: StoreWriter,
fast_field_write: WritePtr,
fieldnorms_serializer: Option<FieldNormsSerializer>,
postings_serializer: InvertedIndexSerializer<C>,
postings_serializer: InvertedIndexSerializer,
}
impl<C: crate::codec::Codec> SegmentSerializer<C> {
impl SegmentSerializer {
/// Creates a new `SegmentSerializer`.
pub fn for_segment(mut segment: Segment<C>) -> crate::Result<SegmentSerializer<C>> {
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
let settings = segment.index().settings().clone();
let store_writer = {
let store_write = segment.open_write(SegmentComponent::Store)?;
@@ -50,12 +50,12 @@ impl<C: crate::codec::Codec> SegmentSerializer<C> {
self.store_writer.mem_usage()
}
pub fn segment(&self) -> &Segment<C> {
pub fn segment(&self) -> &Segment {
&self.segment
}
/// Accessor to the `PostingsSerializer`.
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer<C> {
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
&mut self.postings_serializer
}

View File

@@ -10,13 +10,10 @@ use std::sync::{Arc, RwLock};
use rayon::{ThreadPool, ThreadPoolBuilder};
use super::segment_manager::SegmentManager;
use crate::codec::Codec;
use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
use crate::fastfield::AliveBitSet;
use crate::index::{
CodecConfiguration, Index, IndexMeta, IndexSettings, Segment, SegmentId, SegmentMeta,
};
use crate::index::{Index, IndexMeta, IndexSettings, Segment, SegmentId, SegmentMeta};
use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory;
@@ -64,10 +61,10 @@ pub(crate) fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate:
// We voluntarily pass a merge_operation ref to guarantee that
// the merge_operation is alive during the process
#[derive(Clone)]
pub(crate) struct SegmentUpdater<C: Codec>(Arc<InnerSegmentUpdater<C>>);
pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>);
impl<C: Codec> Deref for SegmentUpdater<C> {
type Target = InnerSegmentUpdater<C>;
impl Deref for SegmentUpdater {
type Target = InnerSegmentUpdater;
#[inline]
fn deref(&self) -> &Self::Target {
@@ -75,8 +72,8 @@ impl<C: Codec> Deref for SegmentUpdater<C> {
}
}
fn garbage_collect_files<C: Codec>(
segment_updater: SegmentUpdater<C>,
fn garbage_collect_files(
segment_updater: SegmentUpdater,
) -> crate::Result<GarbageCollectionResult> {
info!("Running garbage collection");
let mut index = segment_updater.index.clone();
@@ -87,8 +84,8 @@ fn garbage_collect_files<C: Codec>(
/// Merges a list of segments the list of segment givens in the `segment_entries`.
/// This function happens in the calling thread and is computationally expensive.
fn merge<Codec: crate::codec::Codec>(
index: &Index<Codec>,
fn merge(
index: &Index,
mut segment_entries: Vec<SegmentEntry>,
target_opstamp: Opstamp,
) -> crate::Result<Option<SegmentEntry>> {
@@ -111,13 +108,13 @@ fn merge<Codec: crate::codec::Codec>(
let delete_cursor = segment_entries[0].delete_cursor().clone();
let segments: Vec<Segment<Codec>> = segment_entries
let segments: Vec<Segment> = segment_entries
.iter()
.map(|segment_entry| index.segment(segment_entry.meta().clone()))
.collect();
// An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger<Codec> = IndexMerger::open(index.schema(), &segments[..])?;
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
// ... we just serialize this index merger in our new segment to merge the segments.
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
@@ -142,10 +139,10 @@ fn merge<Codec: crate::codec::Codec>(
/// meant to work if you have an `IndexWriter` running for the origin indices, or
/// the destination `Index`.
#[doc(hidden)]
pub fn merge_indices<Codec: crate::codec::Codec>(
indices: &[Index<Codec>],
output_directory: Box<dyn Directory>,
) -> crate::Result<Index<Codec>> {
pub fn merge_indices<T: Into<Box<dyn Directory>>>(
indices: &[Index],
output_directory: T,
) -> crate::Result<Index> {
if indices.is_empty() {
// If there are no indices to merge, there is no need to do anything.
return Err(crate::TantivyError::InvalidArgument(
@@ -166,7 +163,7 @@ pub fn merge_indices<Codec: crate::codec::Codec>(
));
}
let mut segments: Vec<Segment<Codec>> = Vec::new();
let mut segments: Vec<Segment> = Vec::new();
for index in indices {
segments.extend(index.searchable_segments()?);
}
@@ -188,12 +185,12 @@ pub fn merge_indices<Codec: crate::codec::Codec>(
/// meant to work if you have an `IndexWriter` running for the origin indices, or
/// the destination `Index`.
#[doc(hidden)]
pub fn merge_filtered_segments<C: crate::codec::Codec, T: Into<Box<dyn Directory>>>(
segments: &[Segment<C>],
pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
segments: &[Segment],
target_settings: IndexSettings,
filter_doc_ids: Vec<Option<AliveBitSet>>,
output_directory: T,
) -> crate::Result<Index<C>> {
) -> crate::Result<Index> {
if segments.is_empty() {
// If there are no indices to merge, there is no need to do anything.
return Err(crate::TantivyError::InvalidArgument(
@@ -214,15 +211,14 @@ pub fn merge_filtered_segments<C: crate::codec::Codec, T: Into<Box<dyn Directory
));
}
let mut merged_index: Index<C> = Index::builder()
.schema(target_schema.clone())
.codec(segments[0].index().codec().clone())
.settings(target_settings.clone())
.create(output_directory.into())?;
let mut merged_index = Index::create(
output_directory,
target_schema.clone(),
target_settings.clone(),
)?;
let merged_segment = merged_index.new_segment();
let merged_segment_id = merged_segment.id();
let merger: IndexMerger<C> =
let merger: IndexMerger =
IndexMerger::open_with_custom_alive_set(merged_index.schema(), segments, filter_doc_ids)?;
let segment_serializer = SegmentSerializer::for_segment(merged_segment)?;
let num_docs = merger.write(segment_serializer)?;
@@ -239,7 +235,6 @@ pub fn merge_filtered_segments<C: crate::codec::Codec, T: Into<Box<dyn Directory
))
.trim_end()
);
let codec_configuration = CodecConfiguration::from(segments[0].index().codec());
let index_meta = IndexMeta {
index_settings: target_settings, // index_settings of all segments should be the same
@@ -247,7 +242,6 @@ pub fn merge_filtered_segments<C: crate::codec::Codec, T: Into<Box<dyn Directory
schema: target_schema,
opstamp: 0u64,
payload: Some(stats),
codec: codec_configuration,
};
// save the meta.json
@@ -256,7 +250,7 @@ pub fn merge_filtered_segments<C: crate::codec::Codec, T: Into<Box<dyn Directory
Ok(merged_index)
}
pub(crate) struct InnerSegmentUpdater<C: Codec> {
pub(crate) struct InnerSegmentUpdater {
// we keep a copy of the current active IndexMeta to
// avoid loading the file every time we need it in the
// `SegmentUpdater`.
@@ -267,7 +261,7 @@ pub(crate) struct InnerSegmentUpdater<C: Codec> {
pool: ThreadPool,
merge_thread_pool: ThreadPool,
index: Index<C>,
index: Index,
segment_manager: SegmentManager,
merge_policy: RwLock<Arc<dyn MergePolicy>>,
killed: AtomicBool,
@@ -275,13 +269,13 @@ pub(crate) struct InnerSegmentUpdater<C: Codec> {
merge_operations: MergeOperationInventory,
}
impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
impl SegmentUpdater {
pub fn create(
index: Index<Codec>,
index: Index,
stamper: Stamper,
delete_cursor: &DeleteCursor,
num_merge_threads: usize,
) -> crate::Result<Self> {
) -> crate::Result<SegmentUpdater> {
let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
let pool = ThreadPoolBuilder::new()
@@ -410,14 +404,12 @@ impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
//
// Segment 1 from disk 1, Segment 1 from disk 2, etc.
committed_segment_metas.sort_by_key(|segment_meta| -(segment_meta.max_doc() as i32));
let codec = CodecConfiguration::from(index.codec());
let index_meta = IndexMeta {
index_settings: index.settings().clone(),
segments: committed_segment_metas,
schema: index.schema(),
opstamp,
payload: commit_message,
codec,
};
// TODO add context to the error.
save_metas(&index_meta, directory.box_clone().borrow_mut())?;
@@ -451,7 +443,7 @@ impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
opstamp: Opstamp,
payload: Option<String>,
) -> FutureResult<Opstamp> {
let segment_updater: SegmentUpdater<Codec> = self.clone();
let segment_updater: SegmentUpdater = self.clone();
self.schedule_task(move || {
let segment_entries = segment_updater.purge_deletes(opstamp)?;
segment_updater.segment_manager.commit(segment_entries);
@@ -710,7 +702,6 @@ impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
#[cfg(test)]
mod tests {
use super::merge_indices;
use crate::codec::StandardCodec;
use crate::collector::TopDocs;
use crate::directory::RamDirectory;
use crate::fastfield::AliveBitSet;
@@ -924,7 +915,7 @@ mod tests {
#[test]
fn test_merge_empty_indices_array() {
let merge_result = merge_indices::<StandardCodec>(&[], Box::new(RamDirectory::default()));
let merge_result = merge_indices(&[], RamDirectory::default());
assert!(merge_result.is_err());
}
@@ -951,10 +942,7 @@ mod tests {
};
// mismatched schema index list
let result = merge_indices(
&[first_index, second_index],
Box::new(RamDirectory::default()),
);
let result = merge_indices(&[first_index, second_index], RamDirectory::default());
assert!(result.is_err());
Ok(())

View File

@@ -4,7 +4,6 @@ use itertools::Itertools;
use tokenizer_api::BoxTokenStream;
use super::operation::AddOperation;
use crate::codec::Codec;
use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
use crate::index::{Segment, SegmentComponent};
@@ -13,7 +12,7 @@ use crate::indexer::segment_serializer::SegmentSerializer;
use crate::json_utils::{index_json_value, IndexingPositionsPerPath};
use crate::postings::{
compute_table_memory_size, serialize_postings, IndexingContext, IndexingPosition,
PerFieldPostingsWriter, PostingsWriter, PostingsWriterEnum,
PerFieldPostingsWriter, PostingsWriter,
};
use crate::schema::document::{Document, Value};
use crate::schema::{FieldEntry, FieldType, Schema, DATE_TIME_PRECISION_INDEXED};
@@ -46,11 +45,11 @@ fn compute_initial_table_size(per_thread_memory_budget: usize) -> crate::Result<
///
/// They creates the postings list in anonymous memory.
/// The segment is laid on disk when the segment gets `finalized`.
pub struct SegmentWriter<Codec: crate::codec::Codec> {
pub struct SegmentWriter {
pub(crate) max_doc: DocId,
pub(crate) ctx: IndexingContext,
pub(crate) per_field_postings_writers: PerFieldPostingsWriter,
pub(crate) segment_serializer: SegmentSerializer<Codec>,
pub(crate) segment_serializer: SegmentSerializer,
pub(crate) fast_field_writers: FastFieldsWriter,
pub(crate) fieldnorms_writer: FieldNormsWriter,
pub(crate) json_path_writer: JsonPathWriter,
@@ -61,7 +60,7 @@ pub struct SegmentWriter<Codec: crate::codec::Codec> {
schema: Schema,
}
impl<Codec: crate::codec::Codec> SegmentWriter<Codec> {
impl SegmentWriter {
/// Creates a new `SegmentWriter`
///
/// The arguments are defined as follows
@@ -71,10 +70,7 @@ impl<Codec: crate::codec::Codec> SegmentWriter<Codec> {
/// behavior as a memory limit.
/// - segment: The segment being written
/// - schema
pub fn for_segment(
memory_budget_in_bytes: usize,
segment: Segment<Codec>,
) -> crate::Result<Self> {
pub fn for_segment(memory_budget_in_bytes: usize, segment: Segment) -> crate::Result<Self> {
let schema = segment.schema();
let tokenizer_manager = segment.index().tokenizers().clone();
let tokenizer_manager_fast_field = segment.index().fast_field_tokenizer().clone();
@@ -173,7 +169,7 @@ impl<Codec: crate::codec::Codec> SegmentWriter<Codec> {
}
let (term_buffer, ctx) = (&mut self.term_buffer, &mut self.ctx);
let postings_writer: &mut PostingsWriterEnum =
let postings_writer: &mut dyn PostingsWriter =
self.per_field_postings_writers.get_for_field_mut(field);
term_buffer.clear_with_field(field);
@@ -390,13 +386,13 @@ impl<Codec: crate::codec::Codec> SegmentWriter<Codec> {
/// to the `SegmentSerializer`.
///
/// `doc_id_map` is used to map to the new doc_id order.
fn remap_and_write<C: Codec>(
fn remap_and_write(
schema: Schema,
per_field_postings_writers: &PerFieldPostingsWriter,
ctx: IndexingContext,
fast_field_writers: FastFieldsWriter,
fieldnorms_writer: &FieldNormsWriter,
mut serializer: SegmentSerializer<C>,
mut serializer: SegmentSerializer,
) -> crate::Result<()> {
debug!("remap-and-write");
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {

View File

@@ -1,7 +1,5 @@
use std::marker::PhantomData;
use crate::codec::StandardCodec;
use crate::index::CodecConfiguration;
use crate::indexer::operation::AddOperation;
use crate::indexer::segment_updater::save_metas;
use crate::indexer::SegmentWriter;
@@ -9,25 +7,22 @@ use crate::schema::document::Document;
use crate::{Directory, Index, IndexMeta, Opstamp, Segment, TantivyDocument};
#[doc(hidden)]
pub struct SingleSegmentIndexWriter<
Codec: crate::codec::Codec = StandardCodec,
D: Document = TantivyDocument,
> {
segment_writer: SegmentWriter<Codec>,
segment: Segment<Codec>,
pub struct SingleSegmentIndexWriter<D: Document = TantivyDocument> {
segment_writer: SegmentWriter,
segment: Segment,
opstamp: Opstamp,
_doc: PhantomData<D>,
_phantom: PhantomData<D>,
}
impl<Codec: crate::codec::Codec, D: Document> SingleSegmentIndexWriter<Codec, D> {
pub fn new(index: Index<Codec>, mem_budget: usize) -> crate::Result<Self> {
impl<D: Document> SingleSegmentIndexWriter<D> {
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
let segment = index.new_segment();
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
Ok(Self {
segment_writer,
segment,
opstamp: 0,
_doc: PhantomData,
_phantom: PhantomData,
})
}
@@ -42,10 +37,10 @@ impl<Codec: crate::codec::Codec, D: Document> SingleSegmentIndexWriter<Codec, D>
.add_document(AddOperation { opstamp, document })
}
pub fn finalize(self) -> crate::Result<Index<Codec>> {
pub fn finalize(self) -> crate::Result<Index> {
let max_doc = self.segment_writer.max_doc();
self.segment_writer.finalize()?;
let segment: Segment<Codec> = self.segment.with_max_doc(max_doc);
let segment: Segment = self.segment.with_max_doc(max_doc);
let index = segment.index();
let index_meta = IndexMeta {
index_settings: index.settings().clone(),
@@ -53,7 +48,6 @@ impl<Codec: crate::codec::Codec, D: Document> SingleSegmentIndexWriter<Codec, D>
schema: index.schema(),
opstamp: 0,
payload: None,
codec: CodecConfiguration::from(index.codec()),
};
save_metas(&index_meta, index.directory())?;
index.directory().sync_directory()?;

View File

@@ -166,9 +166,6 @@ mod functional_test;
#[macro_use]
mod macros;
/// Tantivy codecs describes how data is layed out on disk.
pub mod codec;
mod future_result;
// Re-exports
@@ -380,7 +377,7 @@ pub mod tests {
use common::{BinarySerializable, FixedSize};
use query_grammar::{UserInputAst, UserInputLeaf, UserInputLiteral};
use rand::distributions::{Bernoulli, Uniform};
use rand::distr::{Bernoulli, Uniform};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use time::OffsetDateTime;
@@ -431,7 +428,7 @@ pub mod tests {
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
let seed: [u8; 32] = [1; 32];
StdRng::from_seed(seed)
.sample_iter(&Uniform::new(0u32, max_value))
.sample_iter(&Uniform::new(0u32, max_value).unwrap())
.take(n_elems)
.collect::<Vec<u32>>()
}

View File

@@ -1,18 +1,28 @@
use std::io;
use common::{OwnedBytes, VInt};
use common::VInt;
use crate::codec::standard::postings::skip::{BlockInfo, SkipReader};
use crate::codec::standard::postings::FreqReadingOption;
use crate::postings::compression::{BlockDecoder, VIntDecoder as _, COMPRESSION_BLOCK_SIZE};
use crate::directory::{FileSlice, OwnedBytes};
use crate::fieldnorm::FieldNormReader;
use crate::postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::{BlockInfo, FreqReadingOption, SkipReader};
use crate::query::Bm25Weight;
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED};
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
it.next().map(|first| it.fold(first, Score::max))
}
/// `BlockSegmentPostings` is a cursor iterating over blocks
/// of documents.
///
/// # Warning
///
/// While it is useful for some very specific high-performance
/// use cases, you should prefer using `SegmentPostings` for most usage.
#[derive(Clone)]
pub(crate) struct BlockSegmentPostings {
pub struct BlockSegmentPostings {
pub(crate) doc_decoder: BlockDecoder,
block_loaded: bool,
freq_decoder: BlockDecoder,
@@ -78,7 +88,7 @@ fn split_into_skips_and_postings(
}
impl BlockSegmentPostings {
/// Opens a `StandardPostingsReader`.
/// Opens a `BlockSegmentPostings`.
/// `doc_freq` is the number of documents in the posting list.
/// `record_option` represents the amount of data available according to the schema.
/// `requested_option` is the amount of data requested by the user.
@@ -86,10 +96,11 @@ impl BlockSegmentPostings {
/// term frequency blocks.
pub(crate) fn open(
doc_freq: u32,
bytes: OwnedBytes,
data: FileSlice,
mut record_option: IndexRecordOption,
requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> {
let bytes = data.read_bytes()?;
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, bytes)?;
let skip_reader = match skip_data_opt {
Some(skip_data) => {
@@ -127,9 +138,76 @@ impl BlockSegmentPostings {
block_segment_postings.load_block();
Ok(block_segment_postings)
}
}
impl BlockSegmentPostings {
/// Returns the block_max_score for the current block.
/// It does not require the block to be loaded. For instance, it is ok to call this method
/// after having called `.shallow_advance(..)`.
///
/// See `TermScorer::block_max_score(..)` for more information.
pub fn block_max_score(
&mut self,
fieldnorm_reader: &FieldNormReader,
bm25_weight: &Bm25Weight,
) -> Score {
if let Some(score) = self.block_max_score_cache {
return score;
}
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
// if we are on a full block, the skip reader should have the block max information
// for us
self.block_max_score_cache = Some(skip_reader_max_score);
return skip_reader_max_score;
}
// this is the last block of the segment posting list.
// If it is actually loaded, we can compute block max manually.
if self.block_is_loaded() {
let docs = self.doc_decoder.output_array().iter().cloned();
let freqs = self.freq_decoder.output_array().iter().cloned();
let bm25_scores = docs.zip(freqs).map(|(doc, term_freq)| {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
bm25_weight.score(fieldnorm_id, term_freq)
});
let block_max_score = max_score(bm25_scores).unwrap_or(0.0);
self.block_max_score_cache = Some(block_max_score);
return block_max_score;
}
// We do not have access to any good block max value. We return bm25_weight.max_score()
// as it is a valid upperbound.
//
// We do not cache it however, so that it gets computed when once block is loaded.
bm25_weight.max_score()
}
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.freq_reading_option
}
// Resets the block segment postings on another position
// in the postings file.
//
// This is useful for enumerating through a list of terms,
// and consuming the associated posting lists while avoiding
// reallocating a `BlockSegmentPostings`.
//
// # Warning
//
// This does not reset the positions list.
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) -> io::Result<()> {
let (skip_data_opt, postings_data) =
split_into_skips_and_postings(doc_freq, postings_data)?;
self.data = postings_data;
self.block_max_score_cache = None;
self.block_loaded = false;
if let Some(skip_data) = skip_data_opt {
self.skip_reader.reset(skip_data, doc_freq);
} else {
self.skip_reader.reset(OwnedBytes::empty(), doc_freq);
}
self.doc_freq = doc_freq;
self.load_block();
Ok(())
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
///
@@ -145,7 +223,7 @@ impl BlockSegmentPostings {
/// returned by `.docs()` is empty.
#[inline]
pub fn docs(&self) -> &[DocId] {
debug_assert!(self.block_loaded);
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_array()
}
@@ -158,24 +236,35 @@ impl BlockSegmentPostings {
/// Return the array of `term freq` in the block.
#[inline]
pub fn freqs(&self) -> &[u32] {
debug_assert!(self.block_loaded);
debug_assert!(self.block_is_loaded());
self.freq_decoder.output_array()
}
/// Return the frequency at index `idx` of the block.
#[inline]
pub fn freq(&self, idx: usize) -> u32 {
debug_assert!(self.block_loaded);
debug_assert!(self.block_is_loaded());
self.freq_decoder.output(idx)
}
/// Returns the length of the current block.
///
/// All blocks have a length of `NUM_DOCS_PER_BLOCK`,
/// except the last block that may have a length
/// of any number between 1 and `NUM_DOCS_PER_BLOCK - 1`
#[inline]
pub fn block_len(&self) -> usize {
debug_assert!(self.block_is_loaded());
self.doc_decoder.output_len
}
/// Position on a block that may contains `target_doc`.
///
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub fn seek(&mut self, target_doc: DocId) -> usize {
// Move to the block that might contain our document.
self.seek_block_without_loading(target_doc);
self.seek_block(target_doc);
self.load_block();
// At this point we are on the block that might contain our document.
@@ -192,79 +281,32 @@ impl BlockSegmentPostings {
doc
}
pub fn position_offset(&self) -> u64 {
pub(crate) fn position_offset(&self) -> u64 {
self.skip_reader.position_offset()
}
/// Advance to the next block.
pub fn advance(&mut self) {
self.skip_reader.advance();
self.block_loaded = false;
self.block_max_score_cache = None;
self.load_block();
}
/// Returns the block_max_score for the current block.
/// It does not require the block to be loaded. For instance, it is ok to call this method
/// after having called `.shallow_advance(..)`.
///
/// See `TermScorer::block_max_score(..)` for more information.
pub fn block_max_score(&mut self, bm25_weight: &Bm25Weight) -> Score {
if let Some(score) = self.block_max_score_cache {
return score;
}
if let Some(skip_reader_max_score) = self.skip_reader.block_max_score(bm25_weight) {
// if we are on a full block, the skip reader should have the block max information
// for us
self.block_max_score_cache = Some(skip_reader_max_score);
return skip_reader_max_score;
}
// We do not have access to any good block max value.
// It happens if this is the last block.
// We return bm25_weight.max_score() as it is a valid upperbound.
//
// We do not cache it however, so that it gets computed when once block is loaded.
bm25_weight.max_score()
}
}
impl BlockSegmentPostings {
/// Returns an empty segment postings object
pub fn empty() -> BlockSegmentPostings {
BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
block_loaded: true,
freq_decoder: BlockDecoder::with_val(1),
freq_reading_option: FreqReadingOption::NoFreq,
block_max_score_cache: None,
doc_freq: 0,
data: OwnedBytes::empty(),
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic),
}
}
pub(crate) fn skip_reader(&self) -> &SkipReader {
&self.skip_reader
}
/// Dangerous API! This calls seeks the next block on the skip list,
/// but does not `.load_block()` afterwards.
///
/// `.load_block()` needs to be called manually afterwards.
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
pub(crate) fn seek_block_without_loading(&mut self, target_doc: DocId) {
pub(crate) fn seek_block(&mut self, target_doc: DocId) {
if self.skip_reader.seek(target_doc) {
self.block_max_score_cache = None;
self.block_loaded = false;
}
}
pub(crate) fn block_is_loaded(&self) -> bool {
self.block_loaded
}
pub(crate) fn load_block(&mut self) {
if self.block_loaded {
let offset = self.skip_reader.byte_offset();
if self.block_is_loaded() {
return;
}
let offset = self.skip_reader.byte_offset();
match self.skip_reader.block_info() {
BlockInfo::BitPacked {
doc_num_bits,
@@ -309,40 +351,68 @@ impl BlockSegmentPostings {
}
self.block_loaded = true;
}
/// Advance to the next block.
pub fn advance(&mut self) {
self.skip_reader.advance();
self.block_loaded = false;
self.block_max_score_cache = None;
self.load_block();
}
/// Returns an empty segment postings object
pub fn empty() -> BlockSegmentPostings {
BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
block_loaded: true,
freq_decoder: BlockDecoder::with_val(1),
freq_reading_option: FreqReadingOption::NoFreq,
block_max_score_cache: None,
doc_freq: 0,
data: OwnedBytes::empty(),
skip_reader: SkipReader::new(OwnedBytes::empty(), 0, IndexRecordOption::Basic),
}
}
pub(crate) fn skip_reader(&self) -> &SkipReader {
&self.skip_reader
}
}
#[cfg(test)]
mod tests {
use common::OwnedBytes;
use common::HasLen;
use super::BlockSegmentPostings;
use crate::codec::postings::PostingsSerializer;
use crate::codec::standard::postings::segment_postings::SegmentPostings;
use crate::codec::standard::postings::StandardPostingsSerializer;
use crate::docset::{DocSet, TERMINATED};
use crate::index::Index;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::schema::IndexRecordOption;
use crate::postings::postings::Postings;
use crate::postings::SegmentPostings;
use crate::schema::{IndexRecordOption, Schema, Term, INDEXED};
use crate::DocId;
#[cfg(test)]
fn build_block_postings(docs: &[u32]) -> BlockSegmentPostings {
let doc_freq = docs.len() as u32;
let mut postings_serializer =
StandardPostingsSerializer::new(1.0f32, IndexRecordOption::Basic, None);
postings_serializer.new_term(docs.len() as u32, false);
for doc in docs {
postings_serializer.write_doc(*doc, 1u32);
}
let mut buffer: Vec<u8> = Vec::new();
postings_serializer
.close_term(doc_freq, &mut buffer)
.unwrap();
BlockSegmentPostings::open(
doc_freq,
OwnedBytes::new(buffer),
IndexRecordOption::Basic,
IndexRecordOption::Basic,
)
.unwrap()
#[test]
fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty();
assert_eq!(postings.doc(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.doc_freq(), 0);
assert_eq!(postings.len(), 0);
}
#[test]
fn test_empty_postings_doc_returns_terminated() {
let mut postings = SegmentPostings::empty();
assert_eq!(postings.doc(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
}
#[test]
fn test_empty_postings_doc_term_freq_returns_0() {
let postings = SegmentPostings::empty();
assert_eq!(postings.term_freq(), 1);
}
#[test]
@@ -357,7 +427,7 @@ mod tests {
#[test]
fn test_block_segment_postings() -> crate::Result<()> {
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>())?;
let mut offset: u32 = 0u32;
// checking that the `doc_freq` is correct
assert_eq!(block_segments.doc_freq(), 100_000);
@@ -382,7 +452,7 @@ mod tests {
doc_ids.push(129);
doc_ids.push(130);
{
let block_segments = build_block_postings(&doc_ids);
let block_segments = build_block_postings(&doc_ids)?;
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
assert_eq!(docset.seek(128), 129);
assert_eq!(docset.doc(), 129);
@@ -391,7 +461,7 @@ mod tests {
assert_eq!(docset.advance(), TERMINATED);
}
{
let block_segments = build_block_postings(&doc_ids);
let block_segments = build_block_postings(&doc_ids).unwrap();
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
assert_eq!(docset.seek(129), 129);
assert_eq!(docset.doc(), 129);
@@ -400,7 +470,7 @@ mod tests {
assert_eq!(docset.advance(), TERMINATED);
}
{
let block_segments = build_block_postings(&doc_ids);
let block_segments = build_block_postings(&doc_ids)?;
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
assert_eq!(docset.doc(), 0);
assert_eq!(docset.seek(131), TERMINATED);
@@ -409,13 +479,38 @@ mod tests {
Ok(())
}
fn build_block_postings(docs: &[DocId]) -> crate::Result<BlockSegmentPostings> {
let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
let mut last_doc = 0u32;
for &doc in docs {
for _ in last_doc..doc {
index_writer.add_document(doc!(int_field=>1u64))?;
}
index_writer.add_document(doc!(int_field=>0u64))?;
last_doc = doc + 1;
}
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(int_field).unwrap();
let term = Term::from_field_u64(int_field, 0u64);
let term_info = inverted_index.get_term_info(&term)?.unwrap();
let block_postings = inverted_index
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?;
Ok(block_postings)
}
#[test]
fn test_block_segment_postings_seek() -> crate::Result<()> {
let mut docs = Vec::new();
let mut docs = vec![0];
for i in 0..1300 {
docs.push((i * i / 100) + i);
}
let mut block_postings = build_block_postings(&docs[..]);
let mut block_postings = build_block_postings(&docs[..])?;
for i in &[0, 424, 10000] {
block_postings.seek(*i);
let docs = block_postings.docs();
@@ -426,4 +521,40 @@ mod tests {
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
Ok(())
}
#[test]
fn test_reset_block_segment_postings() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
// create two postings list, one containing even number,
// the other containing odd numbers.
for i in 0..6 {
let doc = doc!(int_field=> (i % 2) as u64);
index_writer.add_document(doc)?;
}
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let mut block_segments;
{
let term = Term::from_field_u64(int_field, 0u64);
let inverted_index = segment_reader.inverted_index(int_field)?;
let term_info = inverted_index.get_term_info(&term)?.unwrap();
block_segments = inverted_index
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?;
}
assert_eq!(block_segments.docs(), &[0, 2, 4]);
{
let term = Term::from_field_u64(int_field, 1u64);
let inverted_index = segment_reader.inverted_index(int_field)?;
let term_info = inverted_index.get_term_info(&term)?.unwrap();
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?;
}
assert_eq!(block_segments.docs(), &[1, 3, 5]);
Ok(())
}
}

View File

@@ -397,7 +397,10 @@ mod bench {
let mut seed: [u8; 32] = [0; 32];
seed[31] = seed_val;
let mut rng = StdRng::from_seed(seed);
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
(0u32..)
.filter(|_| rng.random_bool(ratio))
.take(n)
.collect()
}
pub fn generate_array(n: usize, ratio: f64) -> Vec<u32> {

View File

@@ -3,7 +3,6 @@ use std::io;
use common::json_path_writer::JSON_END_OF_PATH;
use stacker::Addr;
use crate::codec::Codec;
use crate::indexer::indexing_term::IndexingTerm;
use crate::indexer::path_to_unordered_id::OrderedPathId;
use crate::postings::postings_writer::SpecializedPostingsWriter;
@@ -23,6 +22,12 @@ pub(crate) struct JsonPostingsWriter<Rec: Recorder> {
non_str_posting_writer: SpecializedPostingsWriter<DocIdRecorder>,
}
impl<Rec: Recorder> From<JsonPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
fn from(json_postings_writer: JsonPostingsWriter<Rec>) -> Box<dyn PostingsWriter> {
Box::new(json_postings_writer)
}
}
impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
#[inline]
fn subscribe(
@@ -53,12 +58,12 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
}
/// The actual serialization format is handled by the `PostingsSerializer`.
fn serialize<C: Codec>(
fn serialize(
&self,
ordered_term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
ordered_id_to_path: &[&str],
ctx: &IndexingContext,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer,
) -> io::Result<()> {
let mut term_buffer = JsonTermSerializer(Vec::with_capacity(48));
let mut buffer_lender = BufferLender::default();

View File

@@ -1,5 +1,5 @@
use crate::docset::{DocSet, TERMINATED};
use crate::postings::{DocFreq, Postings};
use crate::postings::{Postings, SegmentPostings};
use crate::DocId;
/// `LoadedPostings` is a `DocSet` and `Postings` implementation.
@@ -25,16 +25,16 @@ impl LoadedPostings {
/// Creates a new `LoadedPostings` from a `SegmentPostings`.
///
/// It will also preload positions, if positions are available in the SegmentPostings.
pub fn load(postings: &mut Box<dyn Postings>) -> LoadedPostings {
let num_docs: usize = u32::from(postings.doc_freq()) as usize;
pub fn load(segment_postings: &mut SegmentPostings) -> LoadedPostings {
let num_docs = segment_postings.doc_freq() as usize;
let mut doc_ids = Vec::with_capacity(num_docs);
let mut positions = Vec::with_capacity(num_docs);
let mut position_offsets = Vec::with_capacity(num_docs);
while postings.doc() != TERMINATED {
while segment_postings.doc() != TERMINATED {
position_offsets.push(positions.len() as u32);
doc_ids.push(postings.doc());
postings.append_positions_with_offset(0, &mut positions);
postings.advance();
doc_ids.push(segment_postings.doc());
segment_postings.append_positions_with_offset(0, &mut positions);
segment_postings.advance();
}
position_offsets.push(positions.len() as u32);
LoadedPostings {
@@ -101,14 +101,6 @@ impl Postings for LoadedPostings {
output.push(*pos + offset);
}
}
fn has_freq(&self) -> bool {
true
}
fn doc_freq(&self) -> DocFreq {
DocFreq::Exact(self.doc_ids.len() as u32)
}
}
#[cfg(test)]

View File

@@ -4,6 +4,7 @@ mod block_search;
pub(crate) use self::block_search::branchless_binary_search;
mod block_segment_postings;
pub(crate) mod compression;
mod indexing_context;
mod json_postings_writer;
@@ -12,22 +13,32 @@ mod per_field_postings_writer;
mod postings;
mod postings_writer;
mod recorder;
mod segment_postings;
mod serializer;
mod skip;
mod term_info;
pub(crate) use loaded_postings::LoadedPostings;
pub use postings::DocFreq;
pub(crate) use stacker::compute_table_memory_size;
pub use self::block_segment_postings::BlockSegmentPostings;
pub(crate) use self::indexing_context::IndexingContext;
pub(crate) use self::per_field_postings_writer::PerFieldPostingsWriter;
pub use self::postings::Postings;
pub(crate) use self::postings_writer::{
serialize_postings, IndexingPosition, PostingsWriter, PostingsWriterEnum,
};
pub(crate) use self::postings_writer::{serialize_postings, IndexingPosition, PostingsWriter};
pub use self::segment_postings::SegmentPostings;
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
pub(crate) use self::skip::{BlockInfo, SkipReader};
pub use self::term_info::TermInfo;
#[expect(clippy::enum_variant_names)]
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
pub(crate) enum FreqReadingOption {
NoFreq,
SkipFreq,
ReadFreq,
}
#[cfg(test)]
pub(crate) mod tests {
use std::mem;
@@ -38,7 +49,6 @@ pub(crate) mod tests {
use crate::index::{Index, SegmentComponent, SegmentReader};
use crate::indexer::operation::AddOperation;
use crate::indexer::SegmentWriter;
use crate::postings::DocFreq;
use crate::query::Scorer;
use crate::schema::{
Field, IndexRecordOption, Schema, Term, TextFieldIndexing, TextOptions, INDEXED, TEXT,
@@ -269,11 +279,11 @@ pub(crate) mod tests {
}
{
let term_a = Term::from_field_text(text_field, "a");
let mut postings_a: Box<dyn Postings> = segment_reader
let mut postings_a = segment_reader
.inverted_index(term_a.field())?
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)?
.unwrap();
assert_eq!(postings_a.doc_freq(), DocFreq::Exact(1000));
assert_eq!(postings_a.len(), 1000);
assert_eq!(postings_a.doc(), 0);
assert_eq!(postings_a.term_freq(), 6);
postings_a.positions(&mut positions);
@@ -296,7 +306,7 @@ pub(crate) mod tests {
.inverted_index(term_e.field())?
.read_postings(&term_e, IndexRecordOption::WithFreqsAndPositions)?
.unwrap();
assert_eq!(postings_e.doc_freq(), DocFreq::Exact(1000 - 2));
assert_eq!(postings_e.len(), 1000 - 2);
for i in 2u32..1000u32 {
assert_eq!(postings_e.term_freq(), i);
postings_e.positions(&mut positions);
@@ -594,13 +604,13 @@ mod bench {
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
for _ in 0..posting_list_size {
let mut doc = TantivyDocument::default();
if rng.gen_bool(1f64 / 15f64) {
if rng.random_bool(1f64 / 15f64) {
doc.add_text(text_field, "a");
}
if rng.gen_bool(1f64 / 10f64) {
if rng.random_bool(1f64 / 10f64) {
doc.add_text(text_field, "b");
}
if rng.gen_bool(1f64 / 5f64) {
if rng.random_bool(1f64 / 5f64) {
doc.add_text(text_field, "c");
}
doc.add_text(text_field, "d");

View File

@@ -1,15 +1,16 @@
use crate::postings::json_postings_writer::JsonPostingsWriter;
use crate::postings::postings_writer::{PostingsWriterEnum, SpecializedPostingsWriter};
use crate::postings::postings_writer::SpecializedPostingsWriter;
use crate::postings::recorder::{DocIdRecorder, TermFrequencyRecorder, TfAndPositionRecorder};
use crate::postings::PostingsWriter;
use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema};
pub(crate) struct PerFieldPostingsWriter {
per_field_postings_writers: Vec<PostingsWriterEnum>,
per_field_postings_writers: Vec<Box<dyn PostingsWriter>>,
}
impl PerFieldPostingsWriter {
pub fn for_schema(schema: &Schema) -> Self {
let per_field_postings_writers: Vec<PostingsWriterEnum> = schema
let per_field_postings_writers = schema
.fields()
.map(|(_, field_entry)| posting_writer_from_field_entry(field_entry))
.collect();
@@ -18,16 +19,16 @@ impl PerFieldPostingsWriter {
}
}
pub(crate) fn get_for_field(&self, field: Field) -> &PostingsWriterEnum {
&self.per_field_postings_writers[field.field_id() as usize]
pub(crate) fn get_for_field(&self, field: Field) -> &dyn PostingsWriter {
self.per_field_postings_writers[field.field_id() as usize].as_ref()
}
pub(crate) fn get_for_field_mut(&mut self, field: Field) -> &mut PostingsWriterEnum {
&mut self.per_field_postings_writers[field.field_id() as usize]
pub(crate) fn get_for_field_mut(&mut self, field: Field) -> &mut dyn PostingsWriter {
self.per_field_postings_writers[field.field_id() as usize].as_mut()
}
}
fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> PostingsWriterEnum {
fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter> {
match *field_entry.field_type() {
FieldType::Str(ref text_options) => text_options
.get_indexing_options()
@@ -50,7 +51,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> PostingsWriterEn
| FieldType::Date(_)
| FieldType::Bytes(_)
| FieldType::IpAddr(_)
| FieldType::Facet(_) => <SpecializedPostingsWriter<DocIdRecorder>>::default().into(),
| FieldType::Facet(_) => Box::<SpecializedPostingsWriter<DocIdRecorder>>::default(),
FieldType::JsonObject(ref json_object_options) => {
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
match text_indexing_option.index_option() {

View File

@@ -1,25 +1,5 @@
use crate::docset::DocSet;
/// Result of the doc_freq method.
///
/// Postings can inform us that the document frequency is approximate.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DocFreq {
/// The document frequency is approximate.
Approximate(u32),
/// The document frequency is exact.
Exact(u32),
}
impl From<DocFreq> for u32 {
fn from(doc_freq: DocFreq) -> Self {
match doc_freq {
DocFreq::Approximate(approximate_doc_freq) => approximate_doc_freq,
DocFreq::Exact(doc_freq) => doc_freq,
}
}
}
/// Postings (also called inverted list)
///
/// For a given term, it is the list of doc ids of the doc
@@ -34,9 +14,6 @@ pub trait Postings: DocSet + 'static {
/// The number of times the term appears in the document.
fn term_freq(&self) -> u32;
/// Returns the number of documents containing the term in the segment.
fn doc_freq(&self) -> DocFreq;
/// Returns the positions offsetted with a given value.
/// It is not necessary to clear the `output` before calling this method.
/// The output vector will be resized to the `term_freq`.
@@ -54,16 +31,6 @@ pub trait Postings: DocSet + 'static {
fn positions(&mut self, output: &mut Vec<u32>) {
self.positions_with_offset(0u32, output);
}
/// Returns true if the term_frequency is available.
///
/// This is a tricky question, because on JSON fields, it is possible
/// for a text term to have term freq, whereas a number term in the field has none.
///
/// This function returns whether the actual term has term frequencies or not.
/// In this above JSON field example, `has_freq` should return true for the
/// earlier and false for the latter.
fn has_freq(&self) -> bool;
}
impl Postings for Box<dyn Postings> {
@@ -74,12 +41,4 @@ impl Postings for Box<dyn Postings> {
fn append_positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
(**self).append_positions_with_offset(offset, output);
}
fn has_freq(&self) -> bool {
(**self).has_freq()
}
fn doc_freq(&self) -> DocFreq {
(**self).doc_freq()
}
}

View File

@@ -4,14 +4,10 @@ use std::ops::Range;
use stacker::Addr;
use crate::codec::Codec;
use crate::fieldnorm::FieldNormReaders;
use crate::indexer::indexing_term::IndexingTerm;
use crate::indexer::path_to_unordered_id::OrderedPathId;
use crate::postings::json_postings_writer::JsonPostingsWriter;
use crate::postings::recorder::{
BufferLender, DocIdRecorder, Recorder, TermFrequencyRecorder, TfAndPositionRecorder,
};
use crate::postings::recorder::{BufferLender, Recorder};
use crate::postings::{
FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter,
};
@@ -49,12 +45,12 @@ fn make_field_partition(
/// Serialize the inverted index.
/// It pushes all term, one field at a time, towards the
/// postings serializer.
pub(crate) fn serialize_postings<C: Codec>(
pub(crate) fn serialize_postings(
ctx: IndexingContext,
schema: Schema,
per_field_postings_writers: &PerFieldPostingsWriter,
fieldnorm_readers: FieldNormReaders,
serializer: &mut InvertedIndexSerializer<C>,
serializer: &mut InvertedIndexSerializer,
) -> crate::Result<()> {
// Replace unordered ids by ordered ids to be able to sort
let unordered_id_to_ordered_id: Vec<OrderedPathId> =
@@ -104,141 +100,6 @@ pub(crate) struct IndexingPosition {
pub end_position: u32,
}
pub enum PostingsWriterEnum {
DocId(SpecializedPostingsWriter<DocIdRecorder>),
DocIdTf(SpecializedPostingsWriter<TermFrequencyRecorder>),
DocTfAndPosition(SpecializedPostingsWriter<TfAndPositionRecorder>),
JsonDocId(JsonPostingsWriter<DocIdRecorder>),
JsonDocIdTf(JsonPostingsWriter<TermFrequencyRecorder>),
JsonDocTfAndPosition(JsonPostingsWriter<TfAndPositionRecorder>),
}
impl From<SpecializedPostingsWriter<DocIdRecorder>> for PostingsWriterEnum {
fn from(doc_id_recorder_writer: SpecializedPostingsWriter<DocIdRecorder>) -> Self {
PostingsWriterEnum::DocId(doc_id_recorder_writer)
}
}
impl From<SpecializedPostingsWriter<TermFrequencyRecorder>> for PostingsWriterEnum {
fn from(doc_id_tf_recorder_writer: SpecializedPostingsWriter<TermFrequencyRecorder>) -> Self {
PostingsWriterEnum::DocIdTf(doc_id_tf_recorder_writer)
}
}
impl From<SpecializedPostingsWriter<TfAndPositionRecorder>> for PostingsWriterEnum {
fn from(
doc_id_tf_and_positions_recorder_writer: SpecializedPostingsWriter<TfAndPositionRecorder>,
) -> Self {
PostingsWriterEnum::DocTfAndPosition(doc_id_tf_and_positions_recorder_writer)
}
}
impl From<JsonPostingsWriter<DocIdRecorder>> for PostingsWriterEnum {
fn from(doc_id_recorder_writer: JsonPostingsWriter<DocIdRecorder>) -> Self {
PostingsWriterEnum::JsonDocId(doc_id_recorder_writer)
}
}
impl From<JsonPostingsWriter<TermFrequencyRecorder>> for PostingsWriterEnum {
fn from(doc_id_tf_recorder_writer: JsonPostingsWriter<TermFrequencyRecorder>) -> Self {
PostingsWriterEnum::JsonDocIdTf(doc_id_tf_recorder_writer)
}
}
impl From<JsonPostingsWriter<TfAndPositionRecorder>> for PostingsWriterEnum {
fn from(
doc_id_tf_and_positions_recorder_writer: JsonPostingsWriter<TfAndPositionRecorder>,
) -> Self {
PostingsWriterEnum::JsonDocTfAndPosition(doc_id_tf_and_positions_recorder_writer)
}
}
impl PostingsWriter for PostingsWriterEnum {
fn subscribe(&mut self, doc: DocId, pos: u32, term: &IndexingTerm, ctx: &mut IndexingContext) {
match self {
PostingsWriterEnum::DocId(writer) => writer.subscribe(doc, pos, term, ctx),
PostingsWriterEnum::DocIdTf(writer) => writer.subscribe(doc, pos, term, ctx),
PostingsWriterEnum::DocTfAndPosition(writer) => writer.subscribe(doc, pos, term, ctx),
PostingsWriterEnum::JsonDocId(writer) => writer.subscribe(doc, pos, term, ctx),
PostingsWriterEnum::JsonDocIdTf(writer) => writer.subscribe(doc, pos, term, ctx),
PostingsWriterEnum::JsonDocTfAndPosition(writer) => {
writer.subscribe(doc, pos, term, ctx)
}
}
}
fn serialize<C: Codec>(
&self,
term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
ordered_id_to_path: &[&str],
ctx: &IndexingContext,
serializer: &mut FieldSerializer<C>,
) -> io::Result<()> {
match self {
PostingsWriterEnum::DocId(writer) => {
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
}
PostingsWriterEnum::DocIdTf(writer) => {
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
}
PostingsWriterEnum::DocTfAndPosition(writer) => {
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
}
PostingsWriterEnum::JsonDocId(writer) => {
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
}
PostingsWriterEnum::JsonDocIdTf(writer) => {
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
}
PostingsWriterEnum::JsonDocTfAndPosition(writer) => {
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
}
}
}
/// Tokenize a text and subscribe all of its token.
fn index_text(
&mut self,
doc_id: DocId,
token_stream: &mut dyn TokenStream,
term_buffer: &mut IndexingTerm,
ctx: &mut IndexingContext,
indexing_position: &mut IndexingPosition,
) {
match self {
PostingsWriterEnum::DocId(writer) => {
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
}
PostingsWriterEnum::DocIdTf(writer) => {
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
}
PostingsWriterEnum::DocTfAndPosition(writer) => {
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
}
PostingsWriterEnum::JsonDocId(writer) => {
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
}
PostingsWriterEnum::JsonDocIdTf(writer) => {
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
}
PostingsWriterEnum::JsonDocTfAndPosition(writer) => {
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
}
}
}
fn total_num_tokens(&self) -> u64 {
match self {
PostingsWriterEnum::DocId(writer) => writer.total_num_tokens(),
PostingsWriterEnum::DocIdTf(writer) => writer.total_num_tokens(),
PostingsWriterEnum::DocTfAndPosition(writer) => writer.total_num_tokens(),
PostingsWriterEnum::JsonDocId(writer) => writer.total_num_tokens(),
PostingsWriterEnum::JsonDocIdTf(writer) => writer.total_num_tokens(),
PostingsWriterEnum::JsonDocTfAndPosition(writer) => writer.total_num_tokens(),
}
}
}
/// The `PostingsWriter` is in charge of receiving documenting
/// and building a `Segment` in anonymous memory.
///
@@ -255,12 +116,12 @@ pub(crate) trait PostingsWriter: Send + Sync {
/// Serializes the postings on disk.
/// The actual serialization format is handled by the `PostingsSerializer`.
fn serialize<C: Codec>(
fn serialize(
&self,
term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
ordered_id_to_path: &[&str],
ctx: &IndexingContext,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer,
) -> io::Result<()>;
/// Tokenize a text and subscribe all of its token.
@@ -310,14 +171,22 @@ pub(crate) struct SpecializedPostingsWriter<Rec: Recorder> {
_recorder_type: PhantomData<Rec>,
}
impl<Rec: Recorder> From<SpecializedPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
fn from(
specialized_postings_writer: SpecializedPostingsWriter<Rec>,
) -> Box<dyn PostingsWriter> {
Box::new(specialized_postings_writer)
}
}
impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
#[inline]
pub(crate) fn serialize_one_term<C: Codec>(
pub(crate) fn serialize_one_term(
term: &[u8],
addr: Addr,
buffer_lender: &mut BufferLender,
ctx: &IndexingContext,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer,
) -> io::Result<()> {
let recorder: Rec = ctx.term_index.read(addr);
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
@@ -358,12 +227,12 @@ impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
});
}
fn serialize<C: Codec>(
fn serialize(
&self,
term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
_ordered_id_to_path: &[&str],
ctx: &IndexingContext,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer,
) -> io::Result<()> {
let mut buffer_lender = BufferLender::default();
for (_field, _path_id, term, addr) in term_addrs {

View File

@@ -1,7 +1,6 @@
use common::read_u32_vint;
use stacker::{ExpUnrolledLinkedList, MemoryArena};
use crate::codec::Codec;
use crate::postings::FieldSerializer;
use crate::DocId;
@@ -68,10 +67,10 @@ pub(crate) trait Recorder: Copy + Default + Send + Sync + 'static {
/// Close the document. It will help record the term frequency.
fn close_doc(&mut self, arena: &mut MemoryArena);
/// Pushes the postings information to the serializer.
fn serialize<C: Codec>(
fn serialize(
&self,
arena: &MemoryArena,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
);
/// Returns the number of document containing this term.
@@ -111,10 +110,10 @@ impl Recorder for DocIdRecorder {
#[inline]
fn close_doc(&mut self, _arena: &mut MemoryArena) {}
fn serialize<C: Codec>(
fn serialize(
&self,
arena: &MemoryArena,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
) {
let buffer = buffer_lender.lend_u8();
@@ -179,10 +178,10 @@ impl Recorder for TermFrequencyRecorder {
self.current_tf = 0;
}
fn serialize<C: Codec>(
fn serialize(
&self,
arena: &MemoryArena,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
) {
let buffer = buffer_lender.lend_u8();
@@ -236,10 +235,10 @@ impl Recorder for TfAndPositionRecorder {
self.stack.writer(arena).write_u32_vint(POSITION_END);
}
fn serialize<C: Codec>(
fn serialize(
&self,
arena: &MemoryArena,
serializer: &mut FieldSerializer<C>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
) {
let (buffer_u8, buffer_positions) = buffer_lender.lend_all();

View File

@@ -1,13 +1,11 @@
use common::{BitSet, HasLen};
use common::HasLen;
use super::BlockSegmentPostings;
use crate::codec::postings::PostingsWithBlockMax;
use crate::docset::DocSet;
use crate::fastfield::AliveBitSet;
use crate::positions::PositionReader;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::postings::{DocFreq, Postings};
use crate::query::Bm25Weight;
use crate::{DocId, Score};
use crate::postings::{BlockSegmentPostings, Postings};
use crate::{DocId, TERMINATED};
/// `SegmentPostings` represents the inverted list or postings associated with
/// a term in a `Segment`.
@@ -31,6 +29,31 @@ impl SegmentPostings {
}
}
/// Compute the number of non-deleted documents.
///
/// This method will clone and scan through the posting lists.
/// (this is a rather expensive operation).
pub fn doc_freq_given_deletes(&self, alive_bitset: &AliveBitSet) -> u32 {
let mut docset = self.clone();
let mut doc_freq = 0;
loop {
let doc = docset.doc();
if doc == TERMINATED {
return doc_freq;
}
if alive_bitset.is_alive(doc) {
doc_freq += 1u32;
}
docset.advance();
}
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
pub fn doc_freq(&self) -> u32 {
self.block_cursor.doc_freq()
}
/// Creates a segment postings object with the given documents
/// and no frequency encoded.
///
@@ -41,19 +64,13 @@ impl SegmentPostings {
/// buffer with the serialized data.
#[cfg(test)]
pub fn create_from_docs(docs: &[u32]) -> SegmentPostings {
use common::OwnedBytes;
use crate::directory::FileSlice;
use crate::postings::serializer::PostingsSerializer;
use crate::schema::IndexRecordOption;
let mut buffer = Vec::new();
{
use crate::codec::postings::PostingsSerializer;
let mut postings_serializer =
crate::codec::standard::postings::StandardPostingsSerializer::new(
0.0,
IndexRecordOption::Basic,
None,
);
PostingsSerializer::new(0.0, IndexRecordOption::Basic, None);
postings_serializer.new_term(docs.len() as u32, false);
for &doc in docs {
postings_serializer.write_doc(doc, 1u32);
@@ -64,7 +81,7 @@ impl SegmentPostings {
}
let block_segment_postings = BlockSegmentPostings::open(
docs.len() as u32,
OwnedBytes::new(buffer),
FileSlice::from(buffer),
IndexRecordOption::Basic,
IndexRecordOption::Basic,
)
@@ -78,11 +95,9 @@ impl SegmentPostings {
doc_and_tfs: &[(u32, u32)],
fieldnorms: Option<&[u32]>,
) -> SegmentPostings {
use common::OwnedBytes;
use crate::codec::postings::PostingsSerializer as _;
use crate::codec::standard::postings::StandardPostingsSerializer;
use crate::directory::FileSlice;
use crate::fieldnorm::FieldNormReader;
use crate::postings::serializer::PostingsSerializer;
use crate::schema::IndexRecordOption;
use crate::Score;
let mut buffer: Vec<u8> = Vec::new();
@@ -99,7 +114,7 @@ impl SegmentPostings {
total_num_tokens as Score / fieldnorms.len() as Score
})
.unwrap_or(0.0);
let mut postings_serializer = StandardPostingsSerializer::new(
let mut postings_serializer = PostingsSerializer::new(
average_field_norm,
IndexRecordOption::WithFreqs,
fieldnorm_reader,
@@ -113,7 +128,7 @@ impl SegmentPostings {
.unwrap();
let block_segment_postings = BlockSegmentPostings::open(
doc_and_tfs.len() as u32,
OwnedBytes::new(buffer),
FileSlice::from(buffer),
IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs,
)
@@ -143,6 +158,7 @@ impl DocSet for SegmentPostings {
// next needs to be called a first time to point to the correct element.
#[inline]
fn advance(&mut self) -> DocId {
debug_assert!(self.block_cursor.block_is_loaded());
if self.cur == COMPRESSION_BLOCK_SIZE - 1 {
self.cur = 0;
self.block_cursor.advance();
@@ -175,19 +191,6 @@ impl DocSet for SegmentPostings {
fn size_hint(&self) -> u32 {
self.len() as u32
}
fn fill_bitset(&mut self, bitset: &mut BitSet) {
loop {
let docs = self.block_cursor.docs();
if docs.is_empty() {
break;
}
for &doc in docs {
bitset.insert(doc);
}
self.block_cursor.advance();
}
}
}
impl HasLen for SegmentPostings {
@@ -203,7 +206,7 @@ impl Postings for SegmentPostings {
///
/// # Panics
///
/// Will panics if called without having cagled advance before.
/// Will panics if called without having called advance before.
fn term_freq(&self) -> u32 {
debug_assert!(
// Here we do not use the len of `freqs()`
@@ -218,13 +221,6 @@ impl Postings for SegmentPostings {
self.block_cursor.freq(self.cur)
}
/// Returns the overall number of documents in the block postings.
/// It does not take in account whether documents are deleted or not.
#[inline(always)]
fn doc_freq(&self) -> DocFreq {
DocFreq::Exact(self.block_cursor.doc_freq())
}
fn append_positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
let term_freq = self.term_freq();
let prev_len = output.len();
@@ -248,25 +244,6 @@ impl Postings for SegmentPostings {
}
}
}
fn has_freq(&self) -> bool {
!self.block_cursor.freqs().is_empty()
}
}
impl PostingsWithBlockMax for SegmentPostings {
fn seek_block_max(
&mut self,
target_doc: crate::DocId,
similarity_weight: &Bm25Weight,
) -> Score {
self.block_cursor.seek_block_without_loading(target_doc);
self.block_cursor.block_max_score(similarity_weight)
}
fn last_doc_in_block(&self) -> crate::DocId {
self.block_cursor.skip_reader().last_doc_in_block()
}
}
#[cfg(test)]
@@ -276,15 +253,14 @@ mod tests {
use super::SegmentPostings;
use crate::docset::{DocSet, TERMINATED};
use crate::postings::Postings;
use crate::fastfield::AliveBitSet;
use crate::postings::postings::Postings;
#[test]
fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty();
assert_eq!(postings.doc(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.advance(), TERMINATED);
assert_eq!(postings.doc_freq(), crate::postings::DocFreq::Exact(0));
assert_eq!(postings.len(), 0);
}
@@ -300,4 +276,15 @@ mod tests {
let postings = SegmentPostings::empty();
assert_eq!(postings.term_freq(), 1);
}
#[test]
fn test_doc_freq() {
let docs = SegmentPostings::create_from_docs(&[0, 2, 10]);
assert_eq!(docs.doc_freq(), 3);
let alive_bitset = AliveBitSet::for_test_from_deleted_docs(&[2], 12);
assert_eq!(docs.doc_freq_given_deletes(&alive_bitset), 2);
let all_deleted =
AliveBitSet::for_test_from_deleted_docs(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 12);
assert_eq!(docs.doc_freq_given_deletes(&all_deleted), 0);
}
}

View File

@@ -1,14 +1,16 @@
use std::cmp::Ordering;
use std::io::{self, Write};
use common::{BinarySerializable, CountingWriter};
use common::{BinarySerializable, CountingWriter, VInt};
use super::TermInfo;
use crate::codec::postings::PostingsSerializer;
use crate::codec::Codec;
use crate::directory::{CompositeWrite, WritePtr};
use crate::fieldnorm::FieldNormReader;
use crate::index::Segment;
use crate::positions::PositionSerializer;
use crate::postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
use crate::postings::skip::SkipSerializer;
use crate::query::Bm25Weight;
use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema};
use crate::termdict::TermDictionaryBuilder;
use crate::{DocId, Score};
@@ -44,27 +46,22 @@ use crate::{DocId, Score};
///
/// A description of the serialization format is
/// [available here](https://fulmicoton.gitbooks.io/tantivy-doc/content/inverted-index.html).
pub struct InvertedIndexSerializer<C: Codec> {
pub struct InvertedIndexSerializer {
terms_write: CompositeWrite<WritePtr>,
postings_write: CompositeWrite<WritePtr>,
positions_write: CompositeWrite<WritePtr>,
schema: Schema,
codec: C,
}
use crate::codec::postings::PostingsCodec;
impl<C: Codec> InvertedIndexSerializer<C> {
impl InvertedIndexSerializer {
/// Open a new `InvertedIndexSerializer` for the given segment
pub fn open(segment: &mut Segment<C>) -> crate::Result<InvertedIndexSerializer<C>> {
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
use crate::index::SegmentComponent::{Positions, Postings, Terms};
let codec = segment.index().codec().clone();
let inv_index_serializer = InvertedIndexSerializer {
terms_write: CompositeWrite::wrap(segment.open_write(Terms)?),
postings_write: CompositeWrite::wrap(segment.open_write(Postings)?),
positions_write: CompositeWrite::wrap(segment.open_write(Positions)?),
schema: segment.schema(),
codec,
};
Ok(inv_index_serializer)
}
@@ -78,7 +75,7 @@ impl<C: Codec> InvertedIndexSerializer<C> {
field: Field,
total_num_tokens: u64,
fieldnorm_reader: Option<FieldNormReader>,
) -> io::Result<FieldSerializer<'_, C>> {
) -> io::Result<FieldSerializer<'_>> {
let field_entry: &FieldEntry = self.schema.get_field_entry(field);
let term_dictionary_write = self.terms_write.for_field(field);
let postings_write = self.postings_write.for_field(field);
@@ -91,7 +88,6 @@ impl<C: Codec> InvertedIndexSerializer<C> {
postings_write,
positions_write,
fieldnorm_reader,
&self.codec,
)
}
@@ -106,9 +102,9 @@ impl<C: Codec> InvertedIndexSerializer<C> {
/// The field serializer is in charge of
/// the serialization of a specific field.
pub struct FieldSerializer<'a, C: Codec> {
pub struct FieldSerializer<'a> {
term_dictionary_builder: TermDictionaryBuilder<&'a mut CountingWriter<WritePtr>>,
postings_serializer: <C::PostingsCodec as PostingsCodec>::PostingsSerializer,
postings_serializer: PostingsSerializer,
positions_serializer_opt: Option<PositionSerializer<&'a mut CountingWriter<WritePtr>>>,
current_term_info: TermInfo,
term_open: bool,
@@ -116,7 +112,7 @@ pub struct FieldSerializer<'a, C: Codec> {
postings_start_offset: u64,
}
impl<'a, C: Codec> FieldSerializer<'a, C> {
impl<'a> FieldSerializer<'a> {
fn create(
field_type: &FieldType,
total_num_tokens: u64,
@@ -124,8 +120,7 @@ impl<'a, C: Codec> FieldSerializer<'a, C> {
postings_write: &'a mut CountingWriter<WritePtr>,
positions_write: &'a mut CountingWriter<WritePtr>,
fieldnorm_reader: Option<FieldNormReader>,
codec: &C,
) -> io::Result<FieldSerializer<'a, C>> {
) -> io::Result<FieldSerializer<'a>> {
total_num_tokens.serialize(postings_write)?;
let index_record_option = field_type
.index_record_option()
@@ -135,11 +130,8 @@ impl<'a, C: Codec> FieldSerializer<'a, C> {
.as_ref()
.map(|ff_reader| total_num_tokens as Score / ff_reader.num_docs() as Score)
.unwrap_or(0.0);
let postings_serializer = codec.postings_codec().new_serializer(
average_fieldnorm,
index_record_option,
fieldnorm_reader,
);
let postings_serializer =
PostingsSerializer::new(average_fieldnorm, index_record_option, fieldnorm_reader);
let positions_serializer_opt = if index_record_option.has_positions() {
Some(PositionSerializer::new(positions_write))
} else {
@@ -192,6 +184,7 @@ impl<'a, C: Codec> FieldSerializer<'a, C> {
"Called new_term, while the previous term was not closed."
);
self.term_open = true;
self.postings_serializer.clear();
self.current_term_info = self.current_term_info();
self.term_dictionary_builder.insert_key(term)?;
self.postings_serializer
@@ -255,3 +248,223 @@ impl<'a, C: Codec> FieldSerializer<'a, C> {
Ok(())
}
}
struct Block {
doc_ids: [DocId; COMPRESSION_BLOCK_SIZE],
term_freqs: [u32; COMPRESSION_BLOCK_SIZE],
len: usize,
}
impl Block {
fn new() -> Self {
Block {
doc_ids: [0u32; COMPRESSION_BLOCK_SIZE],
term_freqs: [0u32; COMPRESSION_BLOCK_SIZE],
len: 0,
}
}
fn doc_ids(&self) -> &[DocId] {
&self.doc_ids[..self.len]
}
fn term_freqs(&self) -> &[u32] {
&self.term_freqs[..self.len]
}
fn clear(&mut self) {
self.len = 0;
}
fn append_doc(&mut self, doc: DocId, term_freq: u32) {
let len = self.len;
self.doc_ids[len] = doc;
self.term_freqs[len] = term_freq;
self.len = len + 1;
}
fn is_full(&self) -> bool {
self.len == COMPRESSION_BLOCK_SIZE
}
fn is_empty(&self) -> bool {
self.len == 0
}
fn last_doc(&self) -> DocId {
assert_eq!(self.len, COMPRESSION_BLOCK_SIZE);
self.doc_ids[COMPRESSION_BLOCK_SIZE - 1]
}
}
pub struct PostingsSerializer {
last_doc_id_encoded: u32,
block_encoder: BlockEncoder,
block: Box<Block>,
postings_write: Vec<u8>,
skip_write: SkipSerializer,
mode: IndexRecordOption,
fieldnorm_reader: Option<FieldNormReader>,
bm25_weight: Option<Bm25Weight>,
avg_fieldnorm: Score, /* Average number of term in the field for that segment.
* this value is used to compute the block wand information. */
term_has_freq: bool,
}
impl PostingsSerializer {
pub fn new(
avg_fieldnorm: Score,
mode: IndexRecordOption,
fieldnorm_reader: Option<FieldNormReader>,
) -> PostingsSerializer {
PostingsSerializer {
block_encoder: BlockEncoder::new(),
block: Box::new(Block::new()),
postings_write: Vec::new(),
skip_write: SkipSerializer::new(),
last_doc_id_encoded: 0u32,
mode,
fieldnorm_reader,
bm25_weight: None,
avg_fieldnorm,
term_has_freq: false,
}
}
pub fn new_term(&mut self, term_doc_freq: u32, record_term_freq: bool) {
self.bm25_weight = None;
self.term_has_freq = self.mode.has_freq() && record_term_freq;
if !self.term_has_freq {
return;
}
let num_docs_in_segment: u64 =
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
fieldnorm_reader.num_docs() as u64
} else {
return;
};
if num_docs_in_segment == 0 {
return;
}
self.bm25_weight = Some(Bm25Weight::for_one_term_without_explain(
term_doc_freq as u64,
num_docs_in_segment,
self.avg_fieldnorm,
));
}
fn write_block(&mut self) {
{
// encode the doc ids
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
self.last_doc_id_encoded = self.block.last_doc();
self.skip_write
.write_doc(self.last_doc_id_encoded, num_bits);
// last el block 0, offset block 1,
self.postings_write.extend(block_encoded);
}
if self.term_has_freq {
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_unsorted(self.block.term_freqs(), true);
self.postings_write.extend(block_encoded);
self.skip_write.write_term_freq(num_bits);
if self.mode.has_positions() {
// We serialize the sum of term freqs within the skip information
// in order to navigate through positions.
let sum_freq = self.block.term_freqs().iter().cloned().sum();
self.skip_write.write_total_term_freq(sum_freq);
}
let mut blockwand_params = (0u8, 0u32);
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
let docs = self.block.doc_ids().iter().cloned();
let term_freqs = self.block.term_freqs().iter().cloned();
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
blockwand_params = fieldnorms
.zip(term_freqs)
.max_by(
|(left_fieldnorm_id, left_term_freq),
(right_fieldnorm_id, right_term_freq)| {
let left_score =
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
let right_score =
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
left_score
.partial_cmp(&right_score)
.unwrap_or(Ordering::Equal)
},
)
.unwrap();
}
}
let (fieldnorm_id, term_freq) = blockwand_params;
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
}
self.block.clear();
}
pub fn write_doc(&mut self, doc_id: DocId, term_freq: u32) {
self.block.append_doc(doc_id, term_freq);
if self.block.is_full() {
self.write_block();
}
}
pub fn close_term(
&mut self,
doc_freq: u32,
output_write: &mut impl std::io::Write,
) -> io::Result<()> {
if !self.block.is_empty() {
// we have doc ids waiting to be written
// this happens when the number of doc ids is
// not a perfect multiple of our block size.
//
// In that case, the remaining part is encoded
// using variable int encoding.
{
let block_encoded = self
.block_encoder
.compress_vint_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
self.postings_write.write_all(block_encoded)?;
}
// ... Idem for term frequencies
if self.term_has_freq {
let block_encoded = self
.block_encoder
.compress_vint_unsorted(self.block.term_freqs());
self.postings_write.write_all(block_encoded)?;
}
self.block.clear();
}
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
let skip_data = self.skip_write.data();
VInt(skip_data.len() as u64).serialize(output_write)?;
output_write.write_all(skip_data)?;
}
output_write.write_all(&self.postings_write[..])?;
self.skip_write.clear();
self.postings_write.clear();
self.bm25_weight = None;
Ok(())
}
fn clear(&mut self) {
self.block.clear();
self.last_doc_id_encoded = 0;
}
}

View File

@@ -142,6 +142,23 @@ impl SkipReader {
skip_reader
}
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) {
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
0
} else {
TERMINATED
};
self.last_doc_in_previous_block = 0u32;
self.owned_read = data;
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
self.byte_offset = 0;
self.remaining_docs = doc_freq;
self.position_offset = 0u64;
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
self.read_block_info();
}
}
// Returns the block max score for this block if available.
//
// The block max score is available for all full bitpacked block,

View File

@@ -2,7 +2,7 @@ use crate::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN, TERMINATED};
use crate::index::SegmentReader;
use crate::query::boost_query::BoostScorer;
use crate::query::explanation::does_not_match;
use crate::query::{box_scorer, EnableScoring, Explanation, Query, Scorer, Weight};
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
use crate::{DocId, Score};
/// Query that matches all of the documents.
@@ -24,9 +24,9 @@ impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let all_scorer = AllScorer::new(reader.max_doc());
if boost != 1.0 {
Ok(box_scorer(BoostScorer::new(all_scorer, boost)))
Ok(Box::new(BoostScorer::new(all_scorer, boost)))
} else {
Ok(box_scorer(all_scorer))
Ok(Box::new(all_scorer))
}
}

View File

@@ -10,7 +10,7 @@ use crate::postings::TermInfo;
use crate::query::{BitSetDocSet, ConstScorer, Explanation, Scorer, Weight};
use crate::schema::{Field, IndexRecordOption};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::{DocId, DocSet, Score, TantivyError};
use crate::{DocId, Score, TantivyError};
/// A weight struct for Fuzzy Term and Regex Queries
pub struct AutomatonWeight<A> {
@@ -92,9 +92,18 @@ where
let mut term_stream = self.automaton_stream(term_dict)?;
while term_stream.advance() {
let term_info = term_stream.value();
let mut block_segment_postings =
inverted_index.read_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
block_segment_postings.fill_bitset(&mut doc_bitset);
let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
for &doc in docs {
doc_bitset.insert(doc);
}
block_segment_postings.advance();
}
}
let doc_bitset = BitSetDocSet::from(doc_bitset);
let const_scorer = ConstScorer::new(doc_bitset, boost);

View File

@@ -24,13 +24,6 @@ impl BitSetDocSet {
self.cursor_bucket = bucket_addr;
self.cursor_tinybitset = self.docs.tinyset(bucket_addr);
}
/// Returns the number of documents in the bitset.
///
/// This call is not free: it will bitcount the number of bits in the bitset.
pub fn doc_freq(&self) -> u32 {
self.docs.len() as u32
}
}
impl From<BitSet> for BitSetDocSet {

View File

@@ -1,6 +1,5 @@
use std::ops::{Deref, DerefMut};
use crate::codec::postings::PostingsWithBlockMax;
use crate::query::term_query::TermScorer;
use crate::query::Scorer;
use crate::{DocId, DocSet, Score, TERMINATED};
@@ -14,8 +13,8 @@ use crate::{DocId, DocSet, Score, TERMINATED};
/// We always have `before_pivot_len` < `pivot_len`.
///
/// `None` is returned if we establish that no document can exceed the threshold.
fn find_pivot_doc<TPostings: PostingsWithBlockMax>(
term_scorers: &[TermScorerWithMaxScore<TPostings>],
fn find_pivot_doc(
term_scorers: &[TermScorerWithMaxScore],
threshold: Score,
) -> Option<(usize, usize, DocId)> {
let mut max_score = 0.0;
@@ -47,8 +46,8 @@ fn find_pivot_doc<TPostings: PostingsWithBlockMax>(
/// the next doc candidate defined by the min of `last_doc_in_block + 1` for
/// scorer in scorers[..pivot_len] and `scorer.doc()` for scorer in scorers[pivot_len..].
/// Note: before and after calling this method, scorers need to be sorted by their `.doc()`.
fn block_max_was_too_low_advance_one_scorer<TPostings: PostingsWithBlockMax>(
scorers: &mut [TermScorerWithMaxScore<TPostings>],
fn block_max_was_too_low_advance_one_scorer(
scorers: &mut [TermScorerWithMaxScore],
pivot_len: usize,
) {
debug_assert!(is_sorted(scorers.iter().map(|scorer| scorer.doc())));
@@ -83,10 +82,7 @@ fn block_max_was_too_low_advance_one_scorer<TPostings: PostingsWithBlockMax>(
// Given a list of term_scorers and a `ord` and assuming that `term_scorers[ord]` is sorted
// except term_scorers[ord] that might be in advance compared to its ranks,
// bubble up term_scorers[ord] in order to restore the ordering.
fn restore_ordering<TPostings: PostingsWithBlockMax>(
term_scorers: &mut [TermScorerWithMaxScore<TPostings>],
ord: usize,
) {
fn restore_ordering(term_scorers: &mut [TermScorerWithMaxScore], ord: usize) {
let doc = term_scorers[ord].doc();
for i in ord + 1..term_scorers.len() {
if term_scorers[i].doc() >= doc {
@@ -101,10 +97,9 @@ fn restore_ordering<TPostings: PostingsWithBlockMax>(
// If this works, return true.
// If this fails (ie: one of the term_scorer does not contain `pivot_doc` and seek goes past the
// pivot), reorder the term_scorers to ensure the list is still sorted and returns `false`.
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and
// return.
fn align_scorers<TPostings: PostingsWithBlockMax>(
term_scorers: &mut Vec<TermScorerWithMaxScore<TPostings>>,
// If a term_scorer reach TERMINATED in the process return false remove the term_scorer and return.
fn align_scorers(
term_scorers: &mut Vec<TermScorerWithMaxScore>,
pivot_doc: DocId,
before_pivot_len: usize,
) -> bool {
@@ -131,10 +126,7 @@ fn align_scorers<TPostings: PostingsWithBlockMax>(
// Assumes terms_scorers[..pivot_len] are positioned on the same doc (pivot_doc).
// Advance term_scorers[..pivot_len] and out of these removes the terminated scores.
// Restores the ordering of term_scorers.
fn advance_all_scorers_on_pivot<TPostings: PostingsWithBlockMax>(
term_scorers: &mut Vec<TermScorerWithMaxScore<TPostings>>,
pivot_len: usize,
) {
fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>, pivot_len: usize) {
for term_scorer in &mut term_scorers[..pivot_len] {
term_scorer.advance();
}
@@ -153,12 +145,12 @@ fn advance_all_scorers_on_pivot<TPostings: PostingsWithBlockMax>(
/// Implements the WAND (Weak AND) algorithm for dynamic pruning
/// described in the paper "Faster Top-k Document Retrieval Using Block-Max Indexes".
/// Link: <http://engineering.nyu.edu/~suel/papers/bmw.pdf>
pub fn block_wand<TPostings: PostingsWithBlockMax>(
mut scorers: Vec<TermScorer<TPostings>>,
pub fn block_wand(
mut scorers: Vec<TermScorer>,
mut threshold: Score,
callback: &mut dyn FnMut(u32, Score) -> Score,
) {
let mut scorers: Vec<TermScorerWithMaxScore<TPostings>> = scorers
let mut scorers: Vec<TermScorerWithMaxScore> = scorers
.iter_mut()
.map(TermScorerWithMaxScore::from)
.collect();
@@ -174,7 +166,10 @@ pub fn block_wand<TPostings: PostingsWithBlockMax>(
let block_max_score_upperbound: Score = scorers[..pivot_len]
.iter_mut()
.map(|scorer| scorer.seek_block_max(pivot_doc))
.map(|scorer| {
scorer.seek_block(pivot_doc);
scorer.block_max_score()
})
.sum();
// Beware after shallow advance, skip readers can be in advance compared to
@@ -225,22 +220,21 @@ pub fn block_wand<TPostings: PostingsWithBlockMax>(
/// - On a block, advance until the end and execute `callback` when the doc score is greater or
/// equal to the `threshold`.
pub fn block_wand_single_scorer(
mut scorer: TermScorer<impl PostingsWithBlockMax>,
mut scorer: TermScorer,
mut threshold: Score,
callback: &mut dyn FnMut(u32, Score) -> Score,
) {
let mut doc = scorer.doc();
let mut block_max_score = scorer.seek_block_max(doc);
loop {
// We position the scorer on a block that can reach
// the threshold.
while block_max_score < threshold {
while scorer.block_max_score() < threshold {
let last_doc_in_block = scorer.last_doc_in_block();
if last_doc_in_block == TERMINATED {
return;
}
doc = last_doc_in_block + 1;
block_max_score = scorer.seek_block_max(doc);
scorer.seek_block(doc);
}
// Seek will effectively load that block.
doc = scorer.seek(doc);
@@ -262,33 +256,31 @@ pub fn block_wand_single_scorer(
}
}
doc += 1;
block_max_score = scorer.seek_block_max(doc);
scorer.seek_block(doc);
}
}
struct TermScorerWithMaxScore<'a, TPostings: PostingsWithBlockMax> {
scorer: &'a mut TermScorer<TPostings>,
struct TermScorerWithMaxScore<'a> {
scorer: &'a mut TermScorer,
max_score: Score,
}
impl<'a, TPostings: PostingsWithBlockMax> From<&'a mut TermScorer<TPostings>>
for TermScorerWithMaxScore<'a, TPostings>
{
fn from(scorer: &'a mut TermScorer<TPostings>) -> Self {
impl<'a> From<&'a mut TermScorer> for TermScorerWithMaxScore<'a> {
fn from(scorer: &'a mut TermScorer) -> Self {
let max_score = scorer.max_score();
TermScorerWithMaxScore { scorer, max_score }
}
}
impl<TPostings: PostingsWithBlockMax> Deref for TermScorerWithMaxScore<'_, TPostings> {
type Target = TermScorer<TPostings>;
impl Deref for TermScorerWithMaxScore<'_> {
type Target = TermScorer;
fn deref(&self) -> &Self::Target {
self.scorer
}
}
impl<TPostings: PostingsWithBlockMax> DerefMut for TermScorerWithMaxScore<'_, TPostings> {
impl DerefMut for TermScorerWithMaxScore<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.scorer
}

View File

@@ -2,16 +2,23 @@ use std::collections::HashMap;
use crate::docset::COLLECT_BLOCK_BUFFER_LEN;
use crate::index::SegmentReader;
use crate::postings::FreqReadingOption;
use crate::query::disjunction::Disjunction;
use crate::query::explanation::does_not_match;
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
use crate::query::weight::{for_each_docset_buffered, for_each_scorer};
use crate::query::term_query::TermScorer;
use crate::query::weight::{for_each_docset_buffered, for_each_pruning_scorer, for_each_scorer};
use crate::query::{
box_scorer, intersect_scorers, AllScorer, BufferedUnionScorer, EmptyScorer, Exclude,
Explanation, Occur, RequiredOptionalScorer, Scorer, Weight,
intersect_scorers, AllScorer, BufferedUnionScorer, EmptyScorer, Exclude, Explanation, Occur,
RequiredOptionalScorer, Scorer, Weight,
};
use crate::{DocId, Score};
enum SpecializedScorer {
TermUnion(Vec<TermScorer>),
Other(Box<dyn Scorer>),
}
fn scorer_disjunction<TScoreCombiner>(
scorers: Vec<Box<dyn Scorer>>,
score_combiner: TScoreCombiner,
@@ -25,7 +32,7 @@ where
if scorers.len() == 1 {
return scorers.into_iter().next().unwrap(); // Safe unwrap.
}
box_scorer(Disjunction::new(
Box::new(Disjunction::new(
scorers,
score_combiner,
minimum_match_required,
@@ -37,18 +44,56 @@ fn scorer_union<TScoreCombiner>(
scorers: Vec<Box<dyn Scorer>>,
score_combiner_fn: impl Fn() -> TScoreCombiner,
num_docs: u32,
) -> Box<dyn Scorer>
) -> SpecializedScorer
where
TScoreCombiner: ScoreCombiner,
{
match scorers.len() {
0 => box_scorer(EmptyScorer),
1 => scorers.into_iter().next().unwrap(),
_ => box_scorer(BufferedUnionScorer::build(
scorers,
score_combiner_fn,
num_docs,
)),
assert!(!scorers.is_empty());
if scorers.len() == 1 {
return SpecializedScorer::Other(scorers.into_iter().next().unwrap()); //< we checked the size beforehand
}
{
let is_all_term_queries = scorers.iter().all(|scorer| scorer.is::<TermScorer>());
if is_all_term_queries {
let scorers: Vec<TermScorer> = scorers
.into_iter()
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap()))
.collect();
if scorers
.iter()
.all(|scorer| scorer.freq_reading_option() == FreqReadingOption::ReadFreq)
{
// Block wand is only available if we read frequencies.
return SpecializedScorer::TermUnion(scorers);
} else {
return SpecializedScorer::Other(Box::new(BufferedUnionScorer::build(
scorers,
score_combiner_fn,
num_docs,
)));
}
}
}
SpecializedScorer::Other(Box::new(BufferedUnionScorer::build(
scorers,
score_combiner_fn,
num_docs,
)))
}
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(
scorer: SpecializedScorer,
score_combiner_fn: impl Fn() -> TScoreCombiner,
num_docs: u32,
) -> Box<dyn Scorer> {
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let union_scorer =
BufferedUnionScorer::build(term_scorers, score_combiner_fn, num_docs);
Box::new(union_scorer)
}
SpecializedScorer::Other(scorer) => scorer,
}
}
@@ -65,7 +110,7 @@ fn effective_must_scorer(
if must_scorers.is_empty() {
if removed_all_scorer_count > 0 {
// Had AllScorer(s) only - all docs match
Some(box_scorer(AllScorer::new(max_doc)))
Some(Box::new(AllScorer::new(max_doc)))
} else {
// No MUST constraint at all
None
@@ -83,26 +128,28 @@ fn effective_must_scorer(
/// When `scoring_enabled` is false, we can just return AllScorer alone since
/// we don't need score contributions from the should_scorer.
fn effective_should_scorer_for_union<TScoreCombiner: ScoreCombiner>(
should_scorer: Box<dyn Scorer>,
should_scorer: SpecializedScorer,
removed_all_scorer_count: usize,
max_doc: DocId,
num_docs: u32,
score_combiner_fn: impl Fn() -> TScoreCombiner,
scoring_enabled: bool,
) -> Box<dyn Scorer> {
) -> SpecializedScorer {
if removed_all_scorer_count > 0 {
if scoring_enabled {
// Need to union to get score contributions from both
let all_scorers: Vec<Box<dyn Scorer>> =
vec![should_scorer, box_scorer(AllScorer::new(max_doc))];
box_scorer(BufferedUnionScorer::build(
let all_scorers: Vec<Box<dyn Scorer>> = vec![
into_box_scorer(should_scorer, &score_combiner_fn, num_docs),
Box::new(AllScorer::new(max_doc)),
];
SpecializedScorer::Other(Box::new(BufferedUnionScorer::build(
all_scorers,
score_combiner_fn,
num_docs,
))
)))
} else {
// Scoring disabled - AllScorer alone is sufficient
box_scorer(AllScorer::new(max_doc))
SpecializedScorer::Other(Box::new(AllScorer::new(max_doc)))
}
} else {
should_scorer
@@ -113,9 +160,9 @@ enum ShouldScorersCombinationMethod {
// Should scorers are irrelevant.
Ignored,
// Only contributes to final score.
Optional(Box<dyn Scorer>),
Optional(SpecializedScorer),
// Regardless of score, the should scorers may impact whether a document is matching or not.
Required(Box<dyn Scorer>),
Required(SpecializedScorer),
}
/// Weight associated to the `BoolQuery`.
@@ -177,7 +224,7 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
reader: &SegmentReader,
boost: Score,
score_combiner_fn: impl Fn() -> TComplexScoreCombiner,
) -> crate::Result<Box<dyn Scorer>> {
) -> crate::Result<SpecializedScorer> {
let num_docs = reader.num_docs();
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
@@ -187,7 +234,7 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
let must_special_scorer_counts = remove_and_count_all_and_empty_scorers(&mut must_scorers);
if must_special_scorer_counts.num_empty_scorers > 0 {
return Ok(box_scorer(EmptyScorer));
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}
let mut should_scorers = per_occur_scorers.remove(&Occur::Should).unwrap_or_default();
@@ -202,7 +249,7 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
if exclude_special_scorer_counts.num_all_scorers > 0 {
// We exclude all documents at one point.
return Ok(box_scorer(EmptyScorer));
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}
let effective_minimum_number_should_match = self
@@ -214,7 +261,7 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
if effective_minimum_number_should_match > num_of_should_scorers {
// We don't have enough scorers to satisfy the minimum number of should matches.
// The request will match no documents.
return Ok(box_scorer(EmptyScorer));
return Ok(SpecializedScorer::Other(Box::new(EmptyScorer)));
}
match effective_minimum_number_should_match {
0 if num_of_should_scorers == 0 => ShouldScorersCombinationMethod::Ignored,
@@ -234,10 +281,12 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
must_scorers.append(&mut should_scorers);
ShouldScorersCombinationMethod::Ignored
}
_ => ShouldScorersCombinationMethod::Required(scorer_disjunction(
should_scorers,
score_combiner_fn(),
effective_minimum_number_should_match,
_ => ShouldScorersCombinationMethod::Required(SpecializedScorer::Other(
scorer_disjunction(
should_scorers,
score_combiner_fn(),
effective_minimum_number_should_match,
),
)),
}
};
@@ -245,9 +294,13 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
let exclude_scorer_opt: Option<Box<dyn Scorer>> = if exclude_scorers.is_empty() {
None
} else {
let exclude_scorers_union: Box<dyn Scorer> =
let exclude_specialized_scorer: SpecializedScorer =
scorer_union(exclude_scorers, DoNothingCombiner::default, num_docs);
Some(exclude_scorers_union)
Some(into_box_scorer(
exclude_specialized_scorer,
DoNothingCombiner::default,
num_docs,
))
};
let include_scorer = match (should_scorers, must_scorers) {
@@ -262,8 +315,8 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
reader.max_doc(),
num_docs,
)
.unwrap_or_else(|| box_scorer(EmptyScorer));
boxed_scorer
.unwrap_or_else(|| Box::new(EmptyScorer));
SpecializedScorer::Other(boxed_scorer)
}
(ShouldScorersCombinationMethod::Optional(should_scorer), must_scorers) => {
// Optional SHOULD: contributes to scoring but not required for matching.
@@ -288,12 +341,16 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
Some(must_scorer) => {
// Has MUST constraint: SHOULD only affects scoring.
if self.scoring_enabled {
box_scorer(RequiredOptionalScorer::<_, _, TScoreCombiner>::new(
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
_,
_,
TScoreCombiner,
>::new(
must_scorer,
should_scorer,
))
into_box_scorer(should_scorer, &score_combiner_fn, num_docs),
)))
} else {
must_scorer
SpecializedScorer::Other(must_scorer)
}
}
}
@@ -313,13 +370,23 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
}
Some(must_scorer) => {
// Has MUST constraint: intersect MUST with SHOULD.
intersect_scorers(vec![must_scorer, should_scorer], num_docs)
let should_boxed =
into_box_scorer(should_scorer, &score_combiner_fn, num_docs);
SpecializedScorer::Other(intersect_scorers(
vec![must_scorer, should_boxed],
num_docs,
))
}
}
}
};
if let Some(exclude_scorer) = exclude_scorer_opt {
Ok(box_scorer(Exclude::new(include_scorer, exclude_scorer)))
let include_scorer_boxed =
into_box_scorer(include_scorer, &score_combiner_fn, num_docs);
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
include_scorer_boxed,
exclude_scorer,
))))
} else {
Ok(include_scorer)
}
@@ -352,6 +419,7 @@ fn remove_and_count_all_and_empty_scorers(
impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombiner> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let num_docs = reader.num_docs();
if self.weights.is_empty() {
Ok(Box::new(EmptyScorer))
} else if self.weights.len() == 1 {
@@ -363,8 +431,14 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
}
} else if self.scoring_enabled {
self.complex_scorer(reader, boost, &self.score_combiner_fn)
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, &self.score_combiner_fn, num_docs)
})
} else {
self.complex_scorer(reader, boost, DoNothingCombiner::default)
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, DoNothingCombiner::default, num_docs)
})
}
}
@@ -393,8 +467,20 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
for_each_scorer(scorer.as_mut(), callback);
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer = BufferedUnionScorer::build(
term_scorers,
&self.score_combiner_fn,
reader.num_docs(),
);
for_each_scorer(&mut union_scorer, callback);
}
SpecializedScorer::Other(mut scorer) => {
for_each_scorer(scorer.as_mut(), callback);
}
}
Ok(())
}
@@ -403,9 +489,22 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
reader: &SegmentReader,
callback: &mut dyn FnMut(&[DocId]),
) -> crate::Result<()> {
let mut scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
let mut buffer = [0u32; COLLECT_BLOCK_BUFFER_LEN];
for_each_docset_buffered(scorer.as_mut(), &mut buffer, callback);
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer = BufferedUnionScorer::build(
term_scorers,
&self.score_combiner_fn,
reader.num_docs(),
);
for_each_docset_buffered(&mut union_scorer, &mut buffer, callback);
}
SpecializedScorer::Other(mut scorer) => {
for_each_docset_buffered(scorer.as_mut(), &mut buffer, callback);
}
}
Ok(())
}
@@ -426,7 +525,14 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
reader.codec.for_each_pruning(threshold, scorer, callback);
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
super::block_wand(term_scorers, threshold, callback);
}
SpecializedScorer::Other(mut scorer) => {
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
}
}
Ok(())
}
}

View File

@@ -1,6 +1,8 @@
mod block_wand;
mod boolean_query;
mod boolean_weight;
pub(crate) use self::block_wand::{block_wand, block_wand_single_scorer};
pub use self::boolean_query::BooleanQuery;
pub use self::boolean_weight::BooleanWeight;

View File

@@ -1,6 +1,6 @@
use std::fmt;
use crate::docset::COLLECT_BLOCK_BUFFER_LEN;
use crate::docset::{SeekDangerResult, COLLECT_BLOCK_BUFFER_LEN};
use crate::fastfield::AliveBitSet;
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, SegmentReader, Term};
@@ -104,8 +104,8 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
fn seek(&mut self, target: DocId) -> DocId {
self.underlying.seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
self.underlying.seek_into_the_danger_zone(target)
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
self.underlying.seek_danger(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId; COLLECT_BLOCK_BUFFER_LEN]) -> usize {

View File

@@ -1,7 +1,7 @@
use std::fmt;
use crate::docset::COLLECT_BLOCK_BUFFER_LEN;
use crate::query::{box_scorer, EnableScoring, Explanation, Query, Scorer, Weight};
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError, Term};
/// `ConstScoreQuery` is a wrapper over a query to provide a constant score.
@@ -65,10 +65,7 @@ impl ConstWeight {
impl Weight for ConstWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let inner_scorer = self.weight.scorer(reader, boost)?;
Ok(box_scorer(ConstScorer::new(
inner_scorer,
boost * self.score,
)))
Ok(Box::new(ConstScorer::new(inner_scorer, boost * self.score)))
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {

View File

@@ -1,6 +1,7 @@
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use crate::docset::SeekDangerResult;
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{ScoreCombiner, Scorer};
use crate::{DocId, DocSet, Score, TERMINATED};
@@ -67,10 +68,12 @@ impl<T: Scorer> DocSet for ScorerWrapper<T> {
self.current_doc = doc_id;
doc_id
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
let found = self.scorer.seek_into_the_danger_zone(target);
self.current_doc = self.scorer.doc();
found
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
let result = self.scorer.seek_danger(target);
if result == SeekDangerResult::Found {
self.current_doc = target;
}
result
}
fn doc(&self) -> DocId {

View File

@@ -2,7 +2,7 @@ use super::Scorer;
use crate::docset::TERMINATED;
use crate::index::SegmentReader;
use crate::query::explanation::does_not_match;
use crate::query::{box_scorer, EnableScoring, Explanation, Query, Weight};
use crate::query::{EnableScoring, Explanation, Query, Weight};
use crate::{DocId, DocSet, Score, Searcher};
/// `EmptyQuery` is a dummy `Query` in which no document matches.
@@ -27,7 +27,7 @@ impl Query for EmptyQuery {
pub struct EmptyWeight;
impl Weight for EmptyWeight {
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
Ok(box_scorer(EmptyScorer))
Ok(Box::new(EmptyScorer))
}
fn explain(&self, _reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {

View File

@@ -3,7 +3,7 @@ use core::fmt::Debug;
use columnar::{ColumnIndex, DynamicColumn};
use common::BitSet;
use super::{box_scorer, ConstScorer, EmptyScorer};
use super::{ConstScorer, EmptyScorer};
use crate::docset::{DocSet, TERMINATED};
use crate::index::SegmentReader;
use crate::query::all_query::AllScorer;
@@ -117,7 +117,7 @@ impl Weight for ExistsWeight {
}
}
if non_empty_columns.is_empty() {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
}
// If any column is full, all docs match.
@@ -128,9 +128,9 @@ impl Weight for ExistsWeight {
{
let all_scorer = AllScorer::new(max_doc);
if boost != 1.0f32 {
return Ok(box_scorer(BoostScorer::new(all_scorer, boost)));
return Ok(Box::new(BoostScorer::new(all_scorer, boost)));
} else {
return Ok(box_scorer(all_scorer));
return Ok(Box::new(all_scorer));
}
}
@@ -138,7 +138,7 @@ impl Weight for ExistsWeight {
// NOTE: A lower number may be better for very sparse columns
if non_empty_columns.len() < 4 {
let docset = ExistsDocSet::new(non_empty_columns, reader.max_doc());
return Ok(box_scorer(ConstScorer::new(docset, boost)));
return Ok(Box::new(ConstScorer::new(docset, boost)));
}
// If we have many dynamic columns, precompute a bitset of matching docs
@@ -162,7 +162,7 @@ impl Weight for ExistsWeight {
}
}
let docset = BitSetDocSet::from(doc_bitset);
Ok(box_scorer(ConstScorer::new(docset, boost)))
Ok(Box::new(ConstScorer::new(docset, boost)))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {

View File

@@ -1,7 +1,7 @@
use super::size_hint::estimate_intersection;
use crate::docset::{DocSet, TERMINATED};
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
use crate::query::term_query::TermScorer;
use crate::query::{box_scorer, EmptyScorer, Scorer};
use crate::query::{EmptyScorer, Scorer};
use crate::{DocId, Score};
/// Returns the intersection scorer.
@@ -20,7 +20,7 @@ pub fn intersect_scorers(
num_docs_segment: u32,
) -> Box<dyn Scorer> {
if scorers.is_empty() {
return box_scorer(EmptyScorer);
return Box::new(EmptyScorer);
}
if scorers.len() == 1 {
return scorers.pop().unwrap();
@@ -29,7 +29,7 @@ pub fn intersect_scorers(
scorers.sort_by_key(|scorer| scorer.cost());
let doc = go_to_first_doc(&mut scorers[..]);
if doc == TERMINATED {
return box_scorer(EmptyScorer);
return Box::new(EmptyScorer);
}
// We know that we have at least 2 elements.
let left = scorers.remove(0);
@@ -38,14 +38,14 @@ pub fn intersect_scorers(
.iter()
.all(|&scorer| scorer.is::<TermScorer>());
if all_term_scorers {
return box_scorer(Intersection {
return Box::new(Intersection {
left: *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
right: *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
others: scorers,
num_docs: num_docs_segment,
});
}
box_scorer(Intersection {
Box::new(Intersection {
left,
right,
others: scorers,
@@ -108,46 +108,63 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
#[inline]
fn advance(&mut self) -> DocId {
let (left, right) = (&mut self.left, &mut self.right);
let mut candidate = left.advance();
if candidate == TERMINATED {
return TERMINATED;
}
loop {
// In the first part we look for a document in the intersection
// of the two rarest `DocSet` in the intersection.
// Invariant:
// - candidate is always <= to the next document in the intersection.
// - candidate strictly increases at every occurence of the loop.
let mut candidate = 0;
loop {
if right.seek_into_the_danger_zone(candidate) {
break;
}
let right_doc = right.doc();
// TODO: Think about which value would make sense here
// It depends on the DocSet implementation, when a seek would outweigh an advance.
if right_doc > candidate.wrapping_add(100) {
candidate = left.seek(right_doc);
} else {
candidate = left.advance();
}
if candidate == TERMINATED {
return TERMINATED;
}
}
// Termination: candidate strictly increases.
'outer: while candidate < TERMINATED {
// As we enter the loop, we should always have candidate < next_doc.
debug_assert_eq!(left.doc(), right.doc());
// test the remaining scorers
if self
.others
.iter_mut()
.all(|docset| docset.seek_into_the_danger_zone(candidate))
// This step always increases candidate.
//
// TODO: Think about which value would make sense here
// It depends on the DocSet implementation, when a seek would outweigh an advance.
candidate = if candidate > left.doc().wrapping_add(100) {
left.seek(candidate)
} else {
left.advance()
};
// Left is positionned on `candidate`.
debug_assert_eq!(left.doc(), candidate);
if let SeekDangerResult::SeekLowerBound(seek_lower_bound) = right.seek_danger(candidate)
{
debug_assert_eq!(candidate, self.left.doc());
debug_assert_eq!(candidate, self.right.doc());
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
return candidate;
// The max is technically useless but it makes the invariant
// easier to proofread.
debug_assert!(seek_lower_bound >= candidate);
candidate = seek_lower_bound;
continue;
}
candidate = left.advance();
// Left and right are positionned on `candidate`.
debug_assert_eq!(right.doc(), candidate);
for other in &mut self.others {
if let SeekDangerResult::SeekLowerBound(seek_lower_bound) =
other.seek_danger(candidate)
{
// One of the scorer does not match, let's restart at the top of the loop.
debug_assert!(seek_lower_bound >= candidate);
candidate = seek_lower_bound;
continue 'outer;
}
}
// At this point all scorers are in a valid state, aligned on the next document in the
// intersection.
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
return candidate;
}
// We make sure our docset is in a valid state.
// In particular, we want .doc() to return TERMINATED.
left.seek(TERMINATED);
TERMINATED
}
fn seek(&mut self, target: DocId) -> DocId {
@@ -166,13 +183,19 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
///
/// Some implementations may choose to advance past the target if beneficial for performance.
/// The return value is `true` if the target is in the docset, and `false` otherwise.
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
self.left.seek_into_the_danger_zone(target)
&& self.right.seek_into_the_danger_zone(target)
&& self
.others
.iter_mut()
.all(|docset| docset.seek_into_the_danger_zone(target))
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
if let SeekDangerResult::SeekLowerBound(new_target) = self.left.seek_danger(target) {
return SeekDangerResult::SeekLowerBound(new_target);
}
if let SeekDangerResult::SeekLowerBound(new_target) = self.right.seek_danger(target) {
return SeekDangerResult::SeekLowerBound(new_target);
}
for docset in &mut self.others {
if let SeekDangerResult::SeekLowerBound(new_target) = docset.seek_danger(target) {
return SeekDangerResult::SeekLowerBound(new_target);
}
}
SeekDangerResult::Found
}
#[inline]
@@ -304,6 +327,58 @@ mod tests {
assert_eq!(intersection.doc(), TERMINATED);
}
#[test]
fn test_intersection_abc() {
let a = VecDocSet::from(vec![2, 3, 6]);
let b = VecDocSet::from(vec![1, 3, 5]);
let c = VecDocSet::from(vec![1, 3, 5]);
let mut intersection = Intersection::new(vec![c, b, a], 10);
let mut docs = Vec::new();
use crate::DocSet;
while intersection.doc() != TERMINATED {
docs.push(intersection.doc());
intersection.advance();
}
assert_eq!(&docs, &[3]);
}
#[test]
fn test_intersection_termination() {
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{BufferedUnionScorer, ConstScorer, VecDocSet};
let a1 = ConstScorer::new(VecDocSet::from(vec![0u32, 10000]), 1.0);
let a2 = ConstScorer::new(VecDocSet::from(vec![0u32, 10000]), 1.0);
let mut b_scorers = vec![];
for _ in 0..2 {
// Union matches 0 and 10000.
b_scorers.push(ConstScorer::new(VecDocSet::from(vec![0, 10000]), 1.0));
}
// That's the union of two scores matching 0, and 10_000.
let union = BufferedUnionScorer::build(b_scorers, DoNothingCombiner::default, 30000);
// Mismatching scorer: matches 0 and 20000. We then append more docs at the end to ensure it
// is last.
let mut m_docs = vec![0, 20000];
for i in 30000..30100 {
m_docs.push(i);
}
let m = ConstScorer::new(VecDocSet::from(m_docs), 1.0);
// Costs: A1=2, A2=2, Union=4, M=102.
// Sorted: A1, A2, Union, M.
// Left=A1, Right=A2, Others=[Union, M].
let mut intersection = crate::query::intersect_scorers(
vec![Box::new(a1), Box::new(a2), Box::new(union), Box::new(m)],
40000,
);
while intersection.doc() != TERMINATED {
intersection.advance();
}
}
// Strategy to generate sorted and deduplicated vectors of u32 document IDs
fn sorted_deduped_vec(max_val: u32, max_size: usize) -> impl Strategy<Value = Vec<u32>> {
prop::collection::vec(0..max_val, 0..max_size).prop_map(|mut vec| {
@@ -335,6 +410,5 @@ mod tests {
}
assert_eq!(intersection.doc(), TERMINATED);
}
}
}

View File

@@ -24,7 +24,7 @@ mod reqopt_scorer;
mod scorer;
mod set_query;
mod size_hint;
pub(crate) mod term_query;
mod term_query;
mod union;
mod weight;
@@ -54,14 +54,13 @@ pub use self::more_like_this::{MoreLikeThisQuery, MoreLikeThisQueryBuilder};
pub use self::phrase_prefix_query::PhrasePrefixQuery;
pub use self::phrase_query::regex_phrase_query::{wildcard_query_to_regex_str, RegexPhraseQuery};
pub use self::phrase_query::PhraseQuery;
pub(crate) use self::phrase_query::PhraseScorer;
pub use self::query::{EnableScoring, Query, QueryClone};
pub use self::query_parser::{QueryParser, QueryParserError};
pub use self::range_query::*;
pub use self::regex_query::RegexQuery;
pub use self::reqopt_scorer::RequiredOptionalScorer;
pub use self::score_combiner::{DisjunctionMaxCombiner, ScoreCombiner, SumCombiner};
pub use self::scorer::{box_scorer, Scorer};
pub use self::scorer::Scorer;
pub use self::set_query::TermSetQuery;
pub use self::term_query::TermQuery;
pub use self::union::BufferedUnionScorer;

View File

@@ -1,8 +1,8 @@
use crate::docset::{DocSet, TERMINATED};
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::query::bm25::Bm25Weight;
use crate::query::phrase_query::{intersection_exists, PhraseScorer};
use crate::query::phrase_query::{intersection_count, PhraseScorer};
use crate::query::Scorer;
use crate::{DocId, Score};
@@ -100,6 +100,7 @@ pub struct PhrasePrefixScorer<TPostings: Postings> {
phrase_scorer: PhraseKind<TPostings>,
suffixes: Vec<TPostings>,
suffix_offset: u32,
phrase_count: u32,
suffix_position_buffer: Vec<u32>,
}
@@ -143,6 +144,7 @@ impl<TPostings: Postings> PhrasePrefixScorer<TPostings> {
phrase_scorer,
suffixes,
suffix_offset: (max_offset - suffix_pos) as u32,
phrase_count: 0,
suffix_position_buffer: Vec::with_capacity(100),
};
if phrase_prefix_scorer.doc() != TERMINATED && !phrase_prefix_scorer.matches_prefix() {
@@ -151,7 +153,12 @@ impl<TPostings: Postings> PhrasePrefixScorer<TPostings> {
phrase_prefix_scorer
}
pub fn phrase_count(&self) -> u32 {
self.phrase_count
}
fn matches_prefix(&mut self) -> bool {
let mut count = 0;
let current_doc = self.doc();
let pos_matching = self.phrase_scorer.get_intersection();
for suffix in &mut self.suffixes {
@@ -161,12 +168,11 @@ impl<TPostings: Postings> PhrasePrefixScorer<TPostings> {
let doc = suffix.seek(current_doc);
if doc == current_doc {
suffix.positions_with_offset(self.suffix_offset, &mut self.suffix_position_buffer);
if intersection_exists(pos_matching, &self.suffix_position_buffer) {
return true;
}
count += intersection_count(pos_matching, &self.suffix_position_buffer);
}
}
false
self.phrase_count = count as u32;
count != 0
}
}
@@ -188,11 +194,16 @@ impl<TPostings: Postings> DocSet for PhrasePrefixScorer<TPostings> {
self.advance()
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
if self.phrase_scorer.seek_into_the_danger_zone(target) {
self.matches_prefix()
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
let seek_res = self.phrase_scorer.seek_danger(target);
if seek_res != SeekDangerResult::Found {
return seek_res;
}
// The intersection matched. Now let's see if we match the prefix.
if self.matches_prefix() {
SeekDangerResult::Found
} else {
false
SeekDangerResult::SeekLowerBound(target + 1)
}
}

View File

@@ -1,11 +1,12 @@
use super::{prefix_end, PhrasePrefixScorer};
use crate::fieldnorm::FieldNormReader;
use crate::index::SegmentReader;
use crate::postings::Postings;
use crate::postings::SegmentPostings;
use crate::query::bm25::Bm25Weight;
use crate::query::{box_scorer, EmptyScorer, Explanation, Scorer, Weight};
use crate::query::explanation::does_not_match;
use crate::query::{EmptyScorer, Explanation, Scorer, Weight};
use crate::schema::{IndexRecordOption, Term};
use crate::{DocId, Score};
use crate::{DocId, DocSet, Score};
pub struct PhrasePrefixWeight {
phrase_terms: Vec<(usize, Term)>,
@@ -45,13 +46,13 @@ impl PhrasePrefixWeight {
&self,
reader: &SegmentReader,
boost: Score,
) -> crate::Result<Option<Box<dyn Scorer>>> {
) -> crate::Result<Option<PhrasePrefixScorer<SegmentPostings>>> {
let similarity_weight_opt = self
.similarity_weight_opt
.as_ref()
.map(|similarity_weight| similarity_weight.boost_by(boost));
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let mut term_postings_list: Vec<(usize, Box<dyn Postings>)> = Vec::new();
let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms {
if let Some(postings) = reader
.inverted_index(term.field())?
@@ -102,44 +103,42 @@ impl PhrasePrefixWeight {
}
}
// TODO make this specialized.
Ok(Some(box_scorer(PhrasePrefixScorer::new(
Ok(Some(PhrasePrefixScorer::new(
term_postings_list,
similarity_weight_opt,
fieldnorm_reader,
suffixes,
self.prefix.0,
))))
)))
}
}
impl Weight for PhrasePrefixWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(scorer)
Ok(Box::new(scorer))
} else {
Ok(box_scorer(EmptyScorer))
Ok(Box::new(EmptyScorer))
}
}
fn explain(&self, _reader: &SegmentReader, _doc: DocId) -> crate::Result<Explanation> {
todo!();
// let scorer_opt = self.phrase_scorer(reader, 1.0)?;
// if scorer_opt.is_none() {
// return Err(does_not_match(doc));
// }
// let mut scorer = scorer_opt.unwrap();
// if scorer.seek(doc) != doc {
// return Err(does_not_match(doc));
// }
// let fieldnorm_reader = self.fieldnorm_reader(reader)?;
// let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
// let phrase_count = scorer.phrase_count();
// let mut explanation = Explanation::new("Phrase Prefix Scorer", scorer.score());
// if let Some(similarity_weight) = self.similarity_weight_opt.as_ref() {
// explanation.add_detail(similarity_weight.explain(fieldnorm_id, phrase_count));
// }
// Ok(explanation)
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
if scorer_opt.is_none() {
return Err(does_not_match(doc));
}
let mut scorer = scorer_opt.unwrap();
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
let phrase_count = scorer.phrase_count();
let mut explanation = Explanation::new("Phrase Prefix Scorer", scorer.score());
if let Some(similarity_weight) = self.similarity_weight_opt.as_ref() {
explanation.add_detail(similarity_weight.explain(fieldnorm_id, phrase_count));
}
Ok(explanation)
}
}
@@ -147,8 +146,6 @@ impl Weight for PhrasePrefixWeight {
mod tests {
use crate::docset::TERMINATED;
use crate::index::Index;
use crate::postings::Postings;
use crate::query::phrase_prefix_query::PhrasePrefixScorer;
use crate::query::{EnableScoring, PhrasePrefixQuery, Query};
use crate::schema::{Schema, TEXT};
use crate::{DocSet, IndexWriter, Term};
@@ -189,14 +186,14 @@ mod tests {
.phrase_prefix_query_weight(enable_scoring)
.unwrap()
.unwrap();
let mut phrase_scorer_boxed = phrase_weight
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
.unwrap();
let phrase_scorer: &mut PhrasePrefixScorer<Box<dyn Postings>> =
phrase_scorer_boxed.as_any_mut().downcast_mut().unwrap();
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);
assert_eq!(phrase_scorer.advance(), 2);
assert_eq!(phrase_scorer.doc(), 2);
assert_eq!(phrase_scorer.phrase_count(), 1);
assert_eq!(phrase_scorer.advance(), TERMINATED);
Ok(())
}
@@ -216,15 +213,14 @@ mod tests {
.phrase_prefix_query_weight(enable_scoring)
.unwrap()
.unwrap();
let mut phrase_scorer_boxed = phrase_weight
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
.unwrap();
let phrase_scorer = phrase_scorer_boxed
.downcast_mut::<PhrasePrefixScorer<Box<dyn Postings>>>()
.unwrap();
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);
assert_eq!(phrase_scorer.advance(), 2);
assert_eq!(phrase_scorer.doc(), 2);
assert_eq!(phrase_scorer.phrase_count(), 1);
assert_eq!(phrase_scorer.advance(), TERMINATED);
Ok(())
}

View File

@@ -5,7 +5,7 @@ pub mod regex_phrase_query;
mod regex_phrase_weight;
pub use self::phrase_query::PhraseQuery;
pub(crate) use self::phrase_scorer::intersection_exists;
pub(crate) use self::phrase_scorer::intersection_count;
pub use self::phrase_scorer::PhraseScorer;
pub use self::phrase_weight::PhraseWeight;

View File

@@ -126,7 +126,7 @@ impl PhraseQuery {
};
let mut weight = PhraseWeight::new(self.phrase_terms.clone(), bm25_weight_opt);
if self.slop > 0 {
weight.set_slop(self.slop);
weight.slop(self.slop);
}
Ok(weight)
}

View File

@@ -1,11 +1,10 @@
use std::cmp::Ordering;
use crate::codec::standard::postings::StandardPostings;
use crate::docset::{DocSet, TERMINATED};
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::query::bm25::Bm25Weight;
use crate::query::{Explanation, Intersection, Scorer};
use crate::query::{Intersection, Scorer};
use crate::{DocId, Score};
struct PostingsWithOffset<TPostings> {
@@ -44,7 +43,7 @@ impl<TPostings: Postings> DocSet for PostingsWithOffset<TPostings> {
}
}
pub struct PhraseScorer<TPostings: Postings = StandardPostings> {
pub struct PhraseScorer<TPostings: Postings> {
intersection_docset: Intersection<PostingsWithOffset<TPostings>, PostingsWithOffset<TPostings>>,
num_terms: usize,
left_positions: Vec<u32>,
@@ -59,7 +58,7 @@ pub struct PhraseScorer<TPostings: Postings = StandardPostings> {
}
/// Returns true if and only if the two sorted arrays contain a common element
pub(crate) fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
let mut left_index = 0;
let mut right_index = 0;
while left_index < left.len() && right_index < right.len() {
@@ -80,7 +79,7 @@ pub(crate) fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
false
}
fn intersection_count(left: &[u32], right: &[u32]) -> usize {
pub(crate) fn intersection_count(left: &[u32], right: &[u32]) -> usize {
let mut left_index = 0;
let mut right_index = 0;
let mut count = 0;
@@ -403,7 +402,6 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
scorer
}
/// Returns the number of phrases identified in the current matching doc.
pub fn phrase_count(&self) -> u32 {
self.phrase_count
}
@@ -532,12 +530,18 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
self.advance()
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
debug_assert!(target >= self.doc());
if self.intersection_docset.seek_into_the_danger_zone(target) && self.phrase_match() {
return true;
let seek_res = self.intersection_docset.seek_danger(target);
if seek_res != SeekDangerResult::Found {
return seek_res;
}
// The intersection matched. Now let's see if we match the phrase.
if self.phrase_match() {
SeekDangerResult::Found
} else {
SeekDangerResult::SeekLowerBound(target + 1)
}
false
}
fn doc(&self) -> DocId {
@@ -575,17 +579,6 @@ impl<TPostings: Postings> Scorer for PhraseScorer<TPostings> {
1.0f32
}
}
fn explain(&mut self) -> Explanation {
let doc = self.doc();
let phrase_count = self.phrase_count();
let fieldnorm_id = self.fieldnorm_reader.fieldnorm_id(doc);
let mut explanation = Explanation::new("Phrase Scorer", self.score());
if let Some(similarity_weight) = self.similarity_weight_opt.as_ref() {
explanation.add_detail(similarity_weight.explain(fieldnorm_id, phrase_count));
}
explanation
}
}
#[cfg(test)]

View File

@@ -1,10 +1,11 @@
use super::PhraseScorer;
use crate::fieldnorm::FieldNormReader;
use crate::index::SegmentReader;
use crate::postings::TermInfo;
use crate::postings::SegmentPostings;
use crate::query::bm25::Bm25Weight;
use crate::query::explanation::does_not_match;
use crate::query::{box_scorer, EmptyScorer, Explanation, Scorer, Weight};
use crate::schema::Term;
use crate::query::{EmptyScorer, Explanation, Scorer, Weight};
use crate::schema::{IndexRecordOption, Term};
use crate::{DocId, DocSet, Score};
pub struct PhraseWeight {
@@ -20,10 +21,11 @@ impl PhraseWeight {
phrase_terms: Vec<(usize, Term)>,
similarity_weight_opt: Option<Bm25Weight>,
) -> PhraseWeight {
let slop = 0;
PhraseWeight {
phrase_terms,
similarity_weight_opt,
slop: 0,
slop,
}
}
@@ -41,53 +43,32 @@ impl PhraseWeight {
&self,
reader: &SegmentReader,
boost: Score,
) -> crate::Result<Option<Box<dyn Scorer>>> {
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> {
let similarity_weight_opt = self
.similarity_weight_opt
.as_ref()
.map(|similarity_weight| similarity_weight.boost_by(boost));
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
if self.phrase_terms.is_empty() {
return Ok(None);
}
let field = self.phrase_terms[0].1.field();
if !self
.phrase_terms
.iter()
.all(|(_offset, term)| term.field() == field)
{
return Err(crate::TantivyError::InvalidArgument(
"All terms in a phrase query must belong to the same field".to_string(),
));
}
let inverted_index_reader = reader.inverted_index(field)?;
let mut term_infos: Vec<(usize, TermInfo)> = Vec::with_capacity(self.phrase_terms.len());
// TODO make it specialized
let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms {
let Some(term_info) = inverted_index_reader.get_term_info(term)? else {
if let Some(postings) = reader
.inverted_index(term.field())?
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)?
{
term_postings_list.push((offset, postings));
} else {
return Ok(None);
};
term_infos.push((offset, term_info));
}
}
let scorer = reader.codec.new_phrase_scorer_type_erased(
&term_infos[..],
Ok(Some(PhraseScorer::new(
term_postings_list,
similarity_weight_opt,
fieldnorm_reader,
self.slop,
&inverted_index_reader,
)?;
Ok(Some(scorer))
)))
}
/// Sets the slop for the given PhraseWeight.
pub fn set_slop(&mut self, slop: u32) {
pub fn slop(&mut self, slop: u32) {
self.slop = slop;
}
}
@@ -95,9 +76,9 @@ impl PhraseWeight {
impl Weight for PhraseWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(scorer)
Ok(Box::new(scorer))
} else {
Ok(box_scorer(EmptyScorer))
Ok(Box::new(EmptyScorer))
}
}
@@ -110,7 +91,14 @@ impl Weight for PhraseWeight {
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
Ok(scorer.explain())
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
let phrase_count = scorer.phrase_count();
let mut explanation = Explanation::new("Phrase Scorer", scorer.score());
if let Some(similarity_weight) = self.similarity_weight_opt.as_ref() {
explanation.add_detail(similarity_weight.explain(fieldnorm_id, phrase_count));
}
Ok(explanation)
}
}
@@ -118,8 +106,7 @@ impl Weight for PhraseWeight {
mod tests {
use super::super::tests::create_index;
use crate::docset::TERMINATED;
use crate::query::phrase_query::PhraseScorer;
use crate::query::{EnableScoring, PhraseQuery, Scorer};
use crate::query::{EnableScoring, PhraseQuery};
use crate::{DocSet, Term};
#[test]
@@ -134,11 +121,9 @@ mod tests {
]);
let enable_scoring = EnableScoring::enabled_from_searcher(&searcher);
let phrase_weight = phrase_query.phrase_weight(enable_scoring).unwrap();
let phrase_scorer_boxed: Box<dyn Scorer> = phrase_weight
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
.unwrap();
let mut phrase_scorer: Box<PhraseScorer> =
phrase_scorer_boxed.downcast::<PhraseScorer>().ok().unwrap();
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);
assert_eq!(phrase_scorer.advance(), 2);

View File

@@ -6,13 +6,11 @@ use tantivy_fst::Regex;
use super::PhraseScorer;
use crate::fieldnorm::FieldNormReader;
use crate::index::SegmentReader;
use crate::postings::{LoadedPostings, Postings, TermInfo};
use crate::postings::{LoadedPostings, Postings, SegmentPostings, TermInfo};
use crate::query::bm25::Bm25Weight;
use crate::query::explanation::does_not_match;
use crate::query::union::{BitSetPostingUnion, SimpleUnion};
use crate::query::{
box_scorer, AutomatonWeight, BitSetDocSet, EmptyScorer, Explanation, Scorer, Weight,
};
use crate::query::{AutomatonWeight, BitSetDocSet, EmptyScorer, Explanation, Scorer, Weight};
use crate::schema::{Field, IndexRecordOption};
use crate::{DocId, DocSet, InvertedIndexReader, Score};
@@ -105,9 +103,18 @@ impl RegexPhraseWeight {
term_info: &TermInfo,
doc_bitset: &mut BitSet,
) -> crate::Result<()> {
let mut segment_postings =
inverted_index.read_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
segment_postings.fill_bitset(doc_bitset);
let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
for &doc in docs {
doc_bitset.insert(doc);
}
block_segment_postings.advance();
}
Ok(())
}
@@ -181,7 +188,7 @@ impl RegexPhraseWeight {
// - Bucket 1: Terms appearing in 0.1% to 1% of documents
// - Bucket 2: Terms appearing in 1% to 10% of documents
// - Bucket 3: Terms appearing in more than 10% of documents
let mut buckets: Vec<(BitSet, Vec<Box<dyn Postings>>)> = (0..4)
let mut buckets: Vec<(BitSet, Vec<SegmentPostings>)> = (0..4)
.map(|_| (BitSet::with_max_value(max_doc), Vec::new()))
.collect();
@@ -190,7 +197,7 @@ impl RegexPhraseWeight {
for term_info in term_infos {
let mut term_posting = inverted_index
.read_postings_from_terminfo(term_info, IndexRecordOption::WithFreqsAndPositions)?;
let num_docs = u32::from(term_posting.doc_freq());
let num_docs = term_posting.doc_freq();
if num_docs < SPARSE_TERM_DOC_THRESHOLD {
let current_bucket = &mut sparse_buckets[0];
@@ -264,9 +271,9 @@ impl RegexPhraseWeight {
impl Weight for RegexPhraseWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(box_scorer(scorer))
Ok(Box::new(scorer))
} else {
Ok(box_scorer(EmptyScorer))
Ok(Box::new(EmptyScorer))
}
}
@@ -304,7 +311,7 @@ mod tests {
#![proptest_config(ProptestConfig::with_cases(50))]
#[test]
fn test_phrase_regex_with_random_strings(mut random_strings in proptest::collection::vec("[c-z ]{0,10}", 1..100), num_occurrences in 1..150_usize) {
let mut rng = rand::thread_rng();
let mut rng = rand::rng();
// Insert "aaa ccc" the specified number of times into the list
for _ in 0..num_occurrences {

View File

@@ -8,12 +8,10 @@ use super::range_query_fastfield::FastFieldRangeWeight;
use crate::index::SegmentReader;
use crate::query::explanation::does_not_match;
use crate::query::range_query::is_type_valid_for_fastfield_range_query;
use crate::query::{
box_scorer, BitSetDocSet, ConstScorer, EnableScoring, Explanation, Query, Scorer, Weight,
};
use crate::query::{BitSetDocSet, ConstScorer, EnableScoring, Explanation, Query, Scorer, Weight};
use crate::schema::{Field, IndexRecordOption, Term, Type};
use crate::termdict::{TermDictionary, TermStreamer};
use crate::{DocId, DocSet, Score};
use crate::{DocId, Score};
/// `RangeQuery` matches all documents that have at least one term within a defined range.
///
@@ -230,12 +228,21 @@ impl Weight for InvertedIndexRangeWeight {
}
processed_count += 1;
let term_info = term_range.value();
let mut postings =
inverted_index.read_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
postings.fill_bitset(&mut doc_bitset);
let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
break;
}
for &doc in block_segment_postings.docs() {
doc_bitset.insert(doc);
}
block_segment_postings.advance();
}
}
let doc_bitset = BitSetDocSet::from(doc_bitset);
Ok(box_scorer(ConstScorer::new(doc_bitset, boost)))
Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
@@ -422,7 +429,7 @@ mod tests {
docs.push(doc);
}
docs.shuffle(&mut rand::thread_rng());
docs.shuffle(&mut rand::rng());
let mut docs_it = docs.into_iter();
for doc in (&mut docs_it).take(50) {
index_writer.add_document(doc)?;

View File

@@ -13,8 +13,7 @@ use common::bounds::{BoundsRange, TransformBound};
use super::fast_field_range_doc_set::RangeDocSet;
use crate::query::{
box_scorer, AllScorer, ConstScorer, EmptyScorer, EnableScoring, Explanation, Query, Scorer,
Weight,
AllScorer, ConstScorer, EmptyScorer, EnableScoring, Explanation, Query, Scorer, Weight,
};
use crate::schema::{Type, ValueBytes};
use crate::{DocId, DocSet, Score, SegmentReader, TantivyError, Term};
@@ -56,7 +55,7 @@ impl Weight for FastFieldRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
// Check if both bounds are Bound::Unbounded
if self.bounds.is_unbounded() {
return Ok(box_scorer(AllScorer::new(reader.max_doc())));
return Ok(Box::new(AllScorer::new(reader.max_doc())));
}
let term = self
@@ -96,7 +95,7 @@ impl Weight for FastFieldRangeWeight {
let Some(str_dict_column): Option<StrColumn> =
reader.fast_fields().str(&field_name)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
let dict = str_dict_column.dictionary();
@@ -108,7 +107,7 @@ impl Weight for FastFieldRangeWeight {
let Some((column, _col_type)) = fast_field_reader
.u64_lenient_for_type(Some(&[ColumnType::Str]), &field_name)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
search_on_u64_ff(column, boost, BoundsRange::new(lower_bound, upper_bound))
}
@@ -120,7 +119,7 @@ impl Weight for FastFieldRangeWeight {
let Some((column, _col_type)) = fast_field_reader
.u64_lenient_for_type(Some(&[ColumnType::DateTime]), &field_name)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
let bounds = bounds.map_bound(|term| term.as_date().unwrap().to_u64());
search_on_u64_ff(
@@ -147,7 +146,7 @@ impl Weight for FastFieldRangeWeight {
let Some(ip_addr_column): Option<Column<Ipv6Addr>> =
reader.fast_fields().column_opt(&field_name)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
let value_range = bound_range_inclusive_ip(
&bounds.lower_bound,
@@ -156,11 +155,11 @@ impl Weight for FastFieldRangeWeight {
ip_addr_column.max_value(),
);
let docset = RangeDocSet::new(value_range, ip_addr_column);
Ok(box_scorer(ConstScorer::new(docset, boost)))
Ok(Box::new(ConstScorer::new(docset, boost)))
} else if field_type.is_str() {
let Some(str_dict_column): Option<StrColumn> = reader.fast_fields().str(&field_name)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
let dict = str_dict_column.dictionary();
@@ -172,7 +171,7 @@ impl Weight for FastFieldRangeWeight {
let Some((column, _col_type)) =
fast_field_reader.u64_lenient_for_type(None, &field_name)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
search_on_u64_ff(column, boost, BoundsRange::new(lower_bound, upper_bound))
} else {
@@ -210,7 +209,7 @@ impl Weight for FastFieldRangeWeight {
&field_name,
)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
search_on_u64_ff(
column,
@@ -251,7 +250,7 @@ fn search_on_json_numerical_field(
let Some((column, col_type)) =
fast_field_reader.u64_lenient_for_type(allowed_column_types, field_name)?
else {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
};
let actual_column_type: NumericalType = col_type
.numerical_type()
@@ -409,18 +408,18 @@ fn search_on_u64_ff(
)
.unwrap_or(1..=0); // empty range
if value_range.is_empty() {
return Ok(box_scorer(EmptyScorer));
return Ok(Box::new(EmptyScorer));
}
if col_min_value >= *value_range.start() && col_max_value <= *value_range.end() {
// all values in the column are within the range.
if column.index.get_cardinality() == Cardinality::Full {
if boost != 1.0f32 {
return Ok(box_scorer(ConstScorer::new(
return Ok(Box::new(ConstScorer::new(
AllScorer::new(column.num_docs()),
boost,
)));
} else {
return Ok(box_scorer(AllScorer::new(column.num_docs())));
return Ok(Box::new(AllScorer::new(column.num_docs())));
}
} else {
// TODO Make it a field presence request for that specific column
@@ -428,7 +427,7 @@ fn search_on_u64_ff(
}
let docset = RangeDocSet::new(value_range, column);
Ok(box_scorer(ConstScorer::new(docset, boost)))
Ok(Box::new(ConstScorer::new(docset, boost)))
}
/// Returns true if the type maps to a u64 fast field
@@ -492,7 +491,7 @@ mod tests {
use common::DateTime;
use proptest::prelude::*;
use rand::rngs::StdRng;
use rand::seq::SliceRandom;
use rand::seq::IndexedRandom;
use rand::SeedableRng;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;

View File

@@ -1,6 +1,6 @@
use std::marker::PhantomData;
use crate::docset::DocSet;
use crate::docset::{DocSet, SeekDangerResult};
use crate::query::score_combiner::ScoreCombiner;
use crate::query::Scorer;
use crate::{DocId, Score};
@@ -56,9 +56,9 @@ where
self.req_scorer.seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
self.score_cache = None;
self.req_scorer.seek_into_the_danger_zone(target)
self.req_scorer.seek_danger(target)
}
fn doc(&self) -> DocId {

View File

@@ -1,11 +1,9 @@
use std::mem::{transmute_copy, ManuallyDrop};
use std::ops::DerefMut;
use downcast_rs::impl_downcast;
use crate::docset::DocSet;
use crate::query::Explanation;
use crate::{DocId, Score, TERMINATED};
use crate::Score;
/// Scored set of documents matching a query within a specific segment.
///
@@ -15,44 +13,6 @@ pub trait Scorer: downcast_rs::Downcast + DocSet + 'static {
///
/// This method will perform a bit of computation and is not cached.
fn score(&mut self) -> Score;
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the TopDocs collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
fn for_each_pruning(
&mut self,
threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
for_each_pruning_scorer_default_impl(self, threshold, callback);
}
/// Returns an explanation for the score of the current document.
fn explain(&mut self) -> Explanation {
let score = self.score();
let name = std::any::type_name_of_val(self);
Explanation::new(name, score)
}
}
/// Boxes a scorer. Prefer this to Box::new as it avoids double boxing
/// when TScorer is already a Box<dyn Scorer>.
pub fn box_scorer<TScorer: Scorer>(scorer: TScorer) -> Box<dyn Scorer> {
if std::any::TypeId::of::<TScorer>() == std::any::TypeId::of::<Box<dyn Scorer>>() {
unsafe {
let forget_me = ManuallyDrop::new(scorer);
transmute_copy::<TScorer, Box<dyn Scorer>>(&forget_me)
}
} else {
Box::new(scorer)
}
}
impl_downcast!(Scorer);
@@ -62,37 +22,4 @@ impl Scorer for Box<dyn Scorer> {
fn score(&mut self) -> Score {
self.deref_mut().score()
}
fn for_each_pruning(
&mut self,
threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
self.deref_mut().for_each_pruning(threshold, callback);
}
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the [`TopDocs`](crate::collector::TopDocs) collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer_default_impl<TScorer: Scorer + ?Sized>(
scorer: &mut TScorer,
mut threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
while doc != TERMINATED {
let score = scorer.score();
if score > threshold {
threshold = callback(doc, score);
}
doc = scorer.advance();
}
}

View File

@@ -4,9 +4,9 @@ mod term_weight;
pub use self::term_query::TermQuery;
pub use self::term_scorer::TermScorer;
#[cfg(test)]
mod tests {
use crate::collector::TopDocs;
use crate::docset::DocSet;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;

View File

@@ -1,27 +1,23 @@
use crate::codec::postings::{PostingsCodec, PostingsWithBlockMax};
use crate::codec::{Codec, StandardCodec};
use crate::docset::DocSet;
use crate::fieldnorm::FieldNormReader;
use crate::postings::Postings;
use crate::postings::{FreqReadingOption, Postings, SegmentPostings};
use crate::query::bm25::Bm25Weight;
use crate::query::{Explanation, Scorer};
use crate::{DocId, Score};
#[derive(Clone)]
pub struct TermScorer<
TPostings: Postings = <<StandardCodec as Codec>::PostingsCodec as PostingsCodec>::Postings,
> {
postings: TPostings,
pub struct TermScorer {
postings: SegmentPostings,
fieldnorm_reader: FieldNormReader,
similarity_weight: Bm25Weight,
}
impl<TPostings: Postings> TermScorer<TPostings> {
impl TermScorer {
pub fn new(
postings: TPostings,
postings: SegmentPostings,
fieldnorm_reader: FieldNormReader,
similarity_weight: Bm25Weight,
) -> TermScorer<TPostings> {
) -> TermScorer {
TermScorer {
postings,
fieldnorm_reader,
@@ -29,35 +25,10 @@ impl<TPostings: Postings> TermScorer<TPostings> {
}
}
pub fn term_freq(&self) -> u32 {
self.postings.term_freq()
pub(crate) fn seek_block(&mut self, target_doc: DocId) {
self.postings.block_cursor.seek_block(target_doc);
}
pub fn fieldnorm_id(&self) -> u8 {
self.fieldnorm_reader.fieldnorm_id(self.doc())
}
pub fn max_score(&self) -> Score {
self.similarity_weight.max_score()
}
}
impl<TPostingsWithBlockMax: PostingsWithBlockMax> TermScorer<TPostingsWithBlockMax> {
pub(crate) fn last_doc_in_block(&self) -> DocId {
self.postings.last_doc_in_block()
}
/// Advances the term scorer to the block containing target_doc and returns
/// an upperbound for the score all of the documents in the block.
/// (BlockMax). This score is not guaranteed to be the
/// effective maximum score of the block.
pub(crate) fn seek_block_max(&mut self, target_doc: DocId) -> Score {
self.postings
.seek_block_max(target_doc, &self.similarity_weight)
}
}
impl TermScorer {
#[cfg(test)]
pub fn create_for_test(
doc_and_tfs: &[(DocId, u32)],
@@ -73,15 +44,60 @@ impl TermScorer {
.unwrap_or(0u32)
< fieldnorms.len() as u32
);
type SegmentPostings = <<StandardCodec as Codec>::PostingsCodec as PostingsCodec>::Postings;
let segment_postings: SegmentPostings =
let segment_postings =
SegmentPostings::create_from_docs_and_tfs(doc_and_tfs, Some(fieldnorms));
let fieldnorm_reader = FieldNormReader::for_test(fieldnorms);
TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight)
}
/// See `FreqReadingOption`.
pub(crate) fn freq_reading_option(&self) -> FreqReadingOption {
self.postings.block_cursor.freq_reading_option()
}
/// Returns the maximum score for the current block.
///
/// In some rare case, the result may not be exact. In this case a lower value is returned,
/// (and may lead us to return a lesser document).
///
/// At index time, we store the (fieldnorm_id, term frequency) pair that maximizes the
/// score assuming the average fieldnorm computed on this segment.
///
/// Though extremely rare, it is theoretically possible that the actual average fieldnorm
/// is different enough from the current segment average fieldnorm that the maximum over a
/// specific is achieved on a different document.
///
/// (The result is on the other hand guaranteed to be correct if there is only one segment).
pub fn block_max_score(&mut self) -> Score {
self.postings
.block_cursor
.block_max_score(&self.fieldnorm_reader, &self.similarity_weight)
}
pub fn term_freq(&self) -> u32 {
self.postings.term_freq()
}
pub fn fieldnorm_id(&self) -> u8 {
self.fieldnorm_reader.fieldnorm_id(self.doc())
}
pub fn explain(&self) -> Explanation {
let fieldnorm_id = self.fieldnorm_id();
let term_freq = self.term_freq();
self.similarity_weight.explain(fieldnorm_id, term_freq)
}
pub fn max_score(&self) -> Score {
self.similarity_weight.max_score()
}
pub fn last_doc_in_block(&self) -> DocId {
self.postings.block_cursor.skip_reader().last_doc_in_block()
}
}
impl<TPostings: Postings> DocSet for TermScorer<TPostings> {
impl DocSet for TermScorer {
#[inline]
fn advance(&mut self) -> DocId {
self.postings.advance()
@@ -102,29 +118,29 @@ impl<TPostings: Postings> DocSet for TermScorer<TPostings> {
}
}
impl<TPostings: Postings> Scorer for TermScorer<TPostings> {
impl Scorer for TermScorer {
#[inline]
fn score(&mut self) -> Score {
let fieldnorm_id = self.fieldnorm_id();
let term_freq = self.term_freq();
self.similarity_weight.score(fieldnorm_id, term_freq)
}
fn explain(&mut self) -> Explanation {
let fieldnorm_id = self.fieldnorm_id();
let term_freq = self.term_freq();
self.similarity_weight.explain(fieldnorm_id, term_freq)
}
}
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use crate::index::SegmentId;
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::merge_policy::NoMergePolicy;
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
use crate::query::term_query::TermScorer;
use crate::query::{Bm25Weight, Scorer};
use crate::{assert_nearly_equals, DocId, DocSet, Score, TERMINATED};
use crate::query::{Bm25Weight, EnableScoring, Scorer, TermQuery};
use crate::schema::{IndexRecordOption, Schema, TEXT};
use crate::{
assert_nearly_equals, DocId, DocSet, Index, IndexWriter, Score, Searcher, Term, TERMINATED,
};
#[test]
fn test_term_scorer_max_score() -> crate::Result<()> {
@@ -138,7 +154,7 @@ mod tests {
crate::assert_nearly_equals!(max_scorer, 1.3990127);
assert_eq!(term_scorer.doc(), 2);
assert_eq!(term_scorer.term_freq(), 3);
assert_nearly_equals!(term_scorer.seek_block_max(2), 1.3676447);
assert_nearly_equals!(term_scorer.block_max_score(), 1.3676447);
assert_nearly_equals!(term_scorer.score(), 1.0892314);
assert_eq!(term_scorer.advance(), 3);
assert_eq!(term_scorer.doc(), 3);
@@ -153,9 +169,9 @@ mod tests {
}
#[test]
fn test_term_scorer_shallow_advance() {
fn test_term_scorer_shallow_advance() -> crate::Result<()> {
let bm25_weight = Bm25Weight::for_one_term(300, 1024, 10.0);
let mut doc_and_tfs = Vec::new();
let mut doc_and_tfs = vec![];
for i in 0u32..300u32 {
let doc = i * 10;
doc_and_tfs.push((doc, 1u32 + doc % 3u32));
@@ -163,10 +179,11 @@ mod tests {
let fieldnorms: Vec<u32> = std::iter::repeat_n(10u32, 3_000).collect();
let mut term_scorer = TermScorer::create_for_test(&doc_and_tfs, &fieldnorms, bm25_weight);
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek_block_max(1289);
term_scorer.seek_block(1289);
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek(1289);
assert_eq!(term_scorer.doc(), 1290);
Ok(())
}
proptest! {
@@ -200,7 +217,7 @@ mod tests {
let docs: Vec<DocId> = (0..term_doc_freq).map(|doc| doc as DocId).collect();
for block in docs.chunks(COMPRESSION_BLOCK_SIZE) {
let block_max_score: Score = term_scorer.seek_block_max(0);
let block_max_score: Score = term_scorer.block_max_score();
let mut block_max_score_computed: Score = 0.0;
for &doc in block {
assert_eq!(term_scorer.doc(), doc);
@@ -212,110 +229,112 @@ mod tests {
}
}
// #[test]
// fn test_block_wand() {
// let mut doc_tfs: Vec<(u32, u32)> = vec![];
// for doc in 0u32..128u32 {
// doc_tfs.push((doc, 1u32));
// }
// for doc in 128u32..256u32 {
// doc_tfs.push((doc, if doc == 200 { 2u32 } else { 1u32 }));
// }
// doc_tfs.push((256, 1u32));
// doc_tfs.push((257, 3u32));
// doc_tfs.push((258, 1u32));
#[test]
fn test_block_wand() {
let mut doc_tfs: Vec<(u32, u32)> = vec![];
for doc in 0u32..128u32 {
doc_tfs.push((doc, 1u32));
}
for doc in 128u32..256u32 {
doc_tfs.push((doc, if doc == 200 { 2u32 } else { 1u32 }));
}
doc_tfs.push((256, 1u32));
doc_tfs.push((257, 3u32));
doc_tfs.push((258, 1u32));
// let fieldnorms: Vec<u32> = std::iter::repeat_n(20u32, 300).collect();
// let bm25_weight = Bm25Weight::for_one_term(10, 129, 20.0);
// let mut docs = TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
// assert_nearly_equals!(docs.seek_block(0), 2.5161593);
// assert_nearly_equals!(docs.seek_block(135), 3.4597192);
// // the block is not loaded yet.
// assert_nearly_equals!(docs.seek_block(256), 5.2971773);
// assert_eq!(256, docs.seek(256));
// assert_nearly_equals!(docs.seek_block(256), 3.9539647);
// }
let fieldnorms: Vec<u32> = std::iter::repeat_n(20u32, 300).collect();
let bm25_weight = Bm25Weight::for_one_term(10, 129, 20.0);
let mut docs = TermScorer::create_for_test(&doc_tfs[..], &fieldnorms[..], bm25_weight);
assert_nearly_equals!(docs.block_max_score(), 2.5161593);
docs.seek_block(135);
assert_nearly_equals!(docs.block_max_score(), 3.4597192);
docs.seek_block(256);
// the block is not loaded yet.
assert_nearly_equals!(docs.block_max_score(), 5.2971773);
assert_eq!(256, docs.seek(256));
assert_nearly_equals!(docs.block_max_score(), 3.9539647);
}
// fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
// let term_weight =
// term_query.specialized_weight(EnableScoring::enabled_from_searcher(searcher))?;
// for reader in searcher.segment_readers() {
// let mut block_max_scores = vec![];
// let mut block_max_scores_b = vec![];
// let mut docs = vec![];
// {
// let mut term_scorer = term_weight.term_scorer_for_test(reader, 1.0)?.unwrap();
// while term_scorer.doc() != TERMINATED {
// let mut score = term_scorer.score();
// docs.push(term_scorer.doc());
// for _ in 0..128 {
// score = score.max(term_scorer.score());
// if term_scorer.advance() == TERMINATED {
// break;
// }
// }
// block_max_scores.push(score);
// }
// }
// {
// let mut term_scorer = term_weight.term_scorer_for_test(reader, 1.0)?.unwrap();
// for d in docs {
// let block_max_score = term_scorer.seek_block(d);
// block_max_scores_b.push(block_max_score);
// }
// }
// for (l, r) in block_max_scores
// .iter()
// .cloned()
// .zip(block_max_scores_b.iter().cloned())
// {
// assert_nearly_equals!(l, r);
// }
// }
// Ok(())
// }
fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
let term_weight =
term_query.specialized_weight(EnableScoring::enabled_from_searcher(searcher))?;
for reader in searcher.segment_readers() {
let mut block_max_scores = vec![];
let mut block_max_scores_b = vec![];
let mut docs = vec![];
{
let mut term_scorer = term_weight.term_scorer_for_test(reader, 1.0)?.unwrap();
while term_scorer.doc() != TERMINATED {
let mut score = term_scorer.score();
docs.push(term_scorer.doc());
for _ in 0..128 {
score = score.max(term_scorer.score());
if term_scorer.advance() == TERMINATED {
break;
}
}
block_max_scores.push(score);
}
}
{
let mut term_scorer = term_weight.term_scorer_for_test(reader, 1.0)?.unwrap();
for d in docs {
term_scorer.seek_block(d);
block_max_scores_b.push(term_scorer.block_max_score());
}
}
for (l, r) in block_max_scores
.iter()
.cloned()
.zip(block_max_scores_b.iter().cloned())
{
assert_nearly_equals!(l, r);
}
}
Ok(())
}
// #[ignore]
// #[test]
// fn test_block_wand_long_test() -> crate::Result<()> {
// let mut schema_builder = Schema::builder();
// let text_field = schema_builder.add_text_field("text", TEXT);
// let schema = schema_builder.build();
// let index = Index::create_in_ram(schema);
// let mut writer: IndexWriter =
// index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
// use rand::Rng;
// let mut rng = rand::thread_rng();
// writer.set_merge_policy(Box::new(NoMergePolicy));
// for _ in 0..3_000 {
// let term_freq = rng.gen_range(1..10000);
// let words: Vec<&str> = std::iter::repeat_n("bbbb", term_freq).collect();
// let text = words.join(" ");
// writer.add_document(doc!(text_field=>text))?;
// }
// writer.commit()?;
// let term_query = TermQuery::new(
// Term::from_field_text(text_field, "bbbb"),
// IndexRecordOption::WithFreqs,
// );
// let segment_ids: Vec<SegmentId>;
// let reader = index.reader()?;
// {
// let searcher = reader.searcher();
// segment_ids = searcher
// .segment_readers()
// .iter()
// .map(|segment| segment.segment_id())
// .collect();
// test_block_wand_aux(&term_query, &searcher)?;
// }
// writer.merge(&segment_ids[..]).wait().unwrap();
// {
// reader.reload()?;
// let searcher = reader.searcher();
// assert_eq!(searcher.segment_readers().len(), 1);
// test_block_wand_aux(&term_query, &searcher)?;
// }
// Ok(())
// }
#[ignore]
#[test]
fn test_block_wand_long_test() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer: IndexWriter =
index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
use rand::Rng;
let mut rng = rand::rng();
writer.set_merge_policy(Box::new(NoMergePolicy));
for _ in 0..3_000 {
let term_freq = rng.random_range(1..10000);
let words: Vec<&str> = std::iter::repeat_n("bbbb", term_freq).collect();
let text = words.join(" ");
writer.add_document(doc!(text_field=>text))?;
}
writer.commit()?;
let term_query = TermQuery::new(
Term::from_field_text(text_field, "bbbb"),
IndexRecordOption::WithFreqs,
);
let segment_ids: Vec<SegmentId>;
let reader = index.reader()?;
{
let searcher = reader.searcher();
segment_ids = searcher
.segment_readers()
.iter()
.map(|segment| segment.segment_id())
.collect();
test_block_wand_aux(&term_query, &searcher)?;
}
writer.merge(&segment_ids[..]).wait().unwrap();
{
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
test_block_wand_aux(&term_query, &searcher)?;
}
Ok(())
}
}

View File

@@ -1,11 +1,12 @@
use super::term_scorer::TermScorer;
use crate::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN};
use crate::fieldnorm::FieldNormReader;
use crate::index::SegmentReader;
use crate::postings::SegmentPostings;
use crate::query::bm25::Bm25Weight;
use crate::query::explanation::does_not_match;
use crate::query::term_query::TermScorer;
use crate::query::weight::{for_each_docset_buffered, for_each_scorer};
use crate::query::{box_scorer, AllScorer, AllWeight, EmptyScorer, Explanation, Scorer, Weight};
use crate::query::{AllScorer, AllWeight, EmptyScorer, Explanation, Scorer, Weight};
use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TantivyError, Term};
@@ -17,7 +18,7 @@ pub struct TermWeight {
}
enum TermOrEmptyOrAllScorer {
TermScorer(Box<dyn Scorer>),
TermScorer(Box<TermScorer>),
Empty,
AllMatch(AllScorer),
}
@@ -26,8 +27,8 @@ impl TermOrEmptyOrAllScorer {
pub fn into_boxed_scorer(self) -> Box<dyn Scorer> {
match self {
TermOrEmptyOrAllScorer::TermScorer(scorer) => scorer,
TermOrEmptyOrAllScorer::Empty => box_scorer(EmptyScorer),
TermOrEmptyOrAllScorer::AllMatch(scorer) => box_scorer(scorer),
TermOrEmptyOrAllScorer::Empty => Box::new(EmptyScorer),
TermOrEmptyOrAllScorer::AllMatch(scorer) => Box::new(scorer),
}
}
}
@@ -43,7 +44,6 @@ impl Weight for TermWeight {
if term_scorer.doc() > doc || term_scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
let mut term_scorer = term_scorer.downcast::<TermScorer>().ok().unwrap();
let mut explanation = term_scorer.explain();
explanation.add_context(format!("Term={:?}", self.term,));
Ok(explanation)
@@ -124,9 +124,11 @@ impl Weight for TermWeight {
let specialized_scorer = self.specialized_scorer(reader, 1.0)?;
match specialized_scorer {
TermOrEmptyOrAllScorer::TermScorer(term_scorer) => {
reader
.codec
.for_each_pruning(threshold, term_scorer, callback);
crate::query::boolean_query::block_wand_single_scorer(
*term_scorer,
threshold,
callback,
);
}
TermOrEmptyOrAllScorer::Empty => {}
TermOrEmptyOrAllScorer::AllMatch(_) => {
@@ -166,15 +168,12 @@ impl TermWeight {
&self,
reader: &SegmentReader,
boost: Score,
) -> Option<super::TermScorer> {
let scorer = self.specialized_scorer(reader, boost).unwrap();
match scorer {
TermOrEmptyOrAllScorer::TermScorer(scorer) => {
let term_scorer = scorer.downcast::<super::TermScorer>().ok()?;
Some(*term_scorer)
}
) -> crate::Result<Option<TermScorer>> {
let scorer = self.specialized_scorer(reader, boost)?;
Ok(match scorer {
TermOrEmptyOrAllScorer::TermScorer(scorer) => Some(*scorer),
_ => None,
}
})
}
fn specialized_scorer(
@@ -197,16 +196,14 @@ impl TermWeight {
)));
}
let segment_postings: SegmentPostings =
inverted_index.read_postings_from_terminfo(&term_info, self.index_record_option)?;
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let similarity_weight = self.similarity_weight.boost_by(boost);
let term_scorer = inverted_index.new_term_scorer(
&term_info,
self.index_record_option,
fieldnorm_reader,
similarity_weight,
)?;
Ok(TermOrEmptyOrAllScorer::TermScorer(term_scorer))
Ok(TermOrEmptyOrAllScorer::TermScorer(Box::new(
TermScorer::new(segment_postings, fieldnorm_reader, similarity_weight),
)))
}
fn fieldnorm_reader(&self, segment_reader: &SegmentReader) -> crate::Result<FieldNormReader> {

View File

@@ -1,7 +1,7 @@
use std::cell::RefCell;
use crate::docset::DocSet;
use crate::postings::{DocFreq, Postings};
use crate::postings::Postings;
use crate::query::BitSetDocSet;
use crate::DocId;
@@ -16,9 +16,6 @@ pub struct BitSetPostingUnion<TDocSet> {
docsets: RefCell<Vec<TDocSet>>,
/// The already unionized BitSet of the docsets
bitset: BitSetDocSet,
/// The total number of documents in the union (regardless of the position we are in the
/// bitset).
doc_freq: u32,
}
impl<TDocSet: DocSet> BitSetPostingUnion<TDocSet> {
@@ -26,11 +23,9 @@ impl<TDocSet: DocSet> BitSetPostingUnion<TDocSet> {
docsets: Vec<TDocSet>,
bitset: BitSetDocSet,
) -> BitSetPostingUnion<TDocSet> {
let doc_freq = bitset.doc_freq();
BitSetPostingUnion {
docsets: RefCell::new(docsets),
bitset,
doc_freq,
}
}
}
@@ -51,10 +46,6 @@ impl<TDocSet: Postings> Postings for BitSetPostingUnion<TDocSet> {
term_freq
}
fn has_freq(&self) -> bool {
true
}
fn append_positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
let curr_doc = self.bitset.doc();
let mut docsets = self.docsets.borrow_mut();
@@ -73,10 +64,6 @@ impl<TDocSet: Postings> Postings for BitSetPostingUnion<TDocSet> {
output.sort_unstable();
output.dedup();
}
fn doc_freq(&self) -> DocFreq {
DocFreq::Exact(self.doc_freq)
}
}
impl<TDocSet: DocSet> DocSet for BitSetPostingUnion<TDocSet> {

View File

@@ -1,6 +1,6 @@
use common::TinySet;
use crate::docset::{DocSet, TERMINATED};
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
use crate::query::size_hint::estimate_union;
use crate::query::Scorer;
@@ -31,7 +31,7 @@ where P: FnMut(&mut T) -> bool {
/// Creates a `DocSet` that iterate through the union of two or more `DocSet`s.
pub struct BufferedUnionScorer<TScorer, TScoreCombiner = DoNothingCombiner> {
/// Active scorers (already filtered of `TERMINATED`).
scorers: Vec<TScorer>,
docsets: Vec<TScorer>,
/// Sliding window presence map for upcoming docs.
///
/// There are `HORIZON_NUM_TINYBITSETS` buckets, each covering
@@ -46,8 +46,6 @@ pub struct BufferedUnionScorer<TScorer, TScoreCombiner = DoNothingCombiner> {
/// hit the same doc within the buffered window.
scores: Box<[TScoreCombiner; HORIZON as usize]>,
/// Start doc ID (inclusive) of the current sliding window.
/// None if the window is not loaded yet. This is true for a freshly created
/// BufferedUnionScorer.
window_start_doc: DocId,
/// Current doc ID of the union.
doc: DocId,
@@ -83,81 +81,51 @@ fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
}
impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> BufferedUnionScorer<TScorer, TScoreCombiner> {
/// Returns the underlying scorers in the union.
pub fn into_scorers(self) -> Vec<TScorer> {
self.scorers
}
/// Accessor for the underlying scorers in the union.
pub fn scorers(&self) -> &[TScorer] {
&self.scorers[..]
}
/// num_docs is the number of documents in the segment.
pub(crate) fn build(
docsets: Vec<TScorer>,
score_combiner_fn: impl FnOnce() -> TScoreCombiner,
num_docs: u32,
) -> BufferedUnionScorer<TScorer, TScoreCombiner> {
let score_combiner = score_combiner_fn();
let mut non_empty_docsets: Vec<TScorer> = docsets
let non_empty_docsets: Vec<TScorer> = docsets
.into_iter()
.filter(|docset| docset.doc() != TERMINATED)
.collect();
let first_doc: DocId = non_empty_docsets
.iter()
.map(|docset| docset.doc())
.min()
.unwrap_or(TERMINATED);
let mut score_combiner_cloned = score_combiner;
let mut i = 0;
while i < non_empty_docsets.len() {
let should_remove_docset: bool = {
let non_empty_docset = &mut non_empty_docsets[i];
if non_empty_docset.doc() != first_doc {
false
} else {
score_combiner_cloned.update(non_empty_docset);
non_empty_docsets[i].advance() == TERMINATED
}
};
if should_remove_docset {
non_empty_docsets.swap_remove(i);
} else {
i += 1;
}
}
let first_score: Score = score_combiner_cloned.score();
BufferedUnionScorer {
scorers: non_empty_docsets,
let mut union = BufferedUnionScorer {
docsets: non_empty_docsets,
bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]),
scores: Box::new([score_combiner; HORIZON as usize]),
scores: Box::new([score_combiner_fn(); HORIZON as usize]),
bucket_idx: HORIZON_NUM_TINYBITSETS,
// That way we will be detected as outside the window,
window_start_doc: u32::MAX - HORIZON,
doc: first_doc,
score: first_score,
window_start_doc: 0,
doc: 0,
score: 0.0,
num_docs,
};
if union.refill() {
union.advance();
} else {
union.doc = TERMINATED;
}
union
}
fn refill(&mut self) -> bool {
let Some(min_doc) = self.scorers.iter().map(DocSet::doc).min() else {
return false;
};
// Reset the sliding window to start at the smallest doc
// across all scorers and prebuffer within the horizon.
self.window_start_doc = min_doc;
self.bucket_idx = 0;
self.doc = min_doc;
refill(
&mut self.scorers,
&mut self.bitsets,
&mut self.scores,
min_doc,
);
true
if let Some(min_doc) = self.docsets.iter().map(DocSet::doc).min() {
// Reset the sliding window to start at the smallest doc
// across all scorers and prebuffer within the horizon.
self.window_start_doc = min_doc;
self.bucket_idx = 0;
self.doc = min_doc;
refill(
&mut self.docsets,
&mut self.bitsets,
&mut self.scores,
min_doc,
);
true
} else {
false
}
}
#[inline]
@@ -179,7 +147,6 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> BufferedUnionScorer<TScorer
fn is_in_horizon(&self, target: DocId) -> bool {
// wrapping_sub, because target may be < window_start_doc
// in particular during initialization.
let gap = target.wrapping_sub(self.window_start_doc);
gap < HORIZON
}
@@ -209,10 +176,11 @@ where
if self.doc >= target {
return self.doc;
}
if self.is_in_horizon(target) {
let gap = target - self.window_start_doc;
if gap < HORIZON {
// Our value is within the buffered horizon.
// Skipping to corresponding bucket.
let gap = target.wrapping_sub(self.window_start_doc);
let new_bucket_idx = gap as usize / 64;
for obsolete_tinyset in &mut self.bitsets[self.bucket_idx..new_bucket_idx] {
obsolete_tinyset.clear();
@@ -231,14 +199,16 @@ where
doc
} else {
// clear the buffered info.
self.bitsets.fill(TinySet::empty());
for obsolete_tinyset in self.bitsets.iter_mut() {
*obsolete_tinyset = TinySet::empty();
}
for score_combiner in self.scores.iter_mut() {
score_combiner.clear();
}
// The target is outside of the buffered horizon.
// advance all docsets to a doc >= to the target.
unordered_drain_filter(&mut self.scorers, |docset| {
unordered_drain_filter(&mut self.docsets, |docset| {
if docset.doc() < target {
docset.seek(target);
}
@@ -255,25 +225,47 @@ where
}
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
if target >= TERMINATED {
return SeekDangerResult::SeekLowerBound(TERMINATED);
}
if self.is_in_horizon(target) {
// Our value is within the buffered horizon and the docset may already have been
// processed and removed, so we need to use seek, which uses the regular advance.
self.seek(target) == target
} else {
// The docsets are not in the buffered range, so we can use seek_into_the_danger_zone
// of the underlying docsets
let is_hit = self
.scorers
.iter_mut()
.any(|docset| docset.seek_into_the_danger_zone(target));
let seek_doc = self.seek(target);
if seek_doc == target {
return SeekDangerResult::Found;
} else {
return SeekDangerResult::SeekLowerBound(seek_doc);
};
}
// The API requires the DocSet to be in a valid state when `seek_into_the_danger_zone`
// returns true.
if is_hit {
self.seek(target);
// The docsets are not in the buffered range, so we can use seek_into_the_danger_zone
// of the underlying docsets
let mut is_hit = false;
let mut min_new_target = TERMINATED;
for docset in self.docsets.iter_mut() {
match docset.seek_danger(target) {
SeekDangerResult::Found => {
is_hit = true;
break;
}
SeekDangerResult::SeekLowerBound(new_target) => {
min_new_target = min_new_target.min(new_target);
}
}
is_hit
}
// The API requires the DocSet to be in a valid state when `seek_into_the_danger_zone`
// returns Found.
if is_hit {
// The doc is found. Let's make sure we position the union on the target
// to bring it back to a valid state.
self.seek(target);
SeekDangerResult::Found
} else {
SeekDangerResult::SeekLowerBound(min_new_target)
}
}
@@ -283,11 +275,11 @@ where
}
fn size_hint(&self) -> u32 {
estimate_union(self.scorers.iter().map(DocSet::size_hint), self.num_docs)
estimate_union(self.docsets.iter().map(DocSet::size_hint), self.num_docs)
}
fn cost(&self) -> u64 {
self.scorers.iter().map(|docset| docset.cost()).sum()
self.docsets.iter().map(|docset| docset.cost()).sum()
}
// TODO Also implement `count` with deletes efficiently.

View File

@@ -14,7 +14,7 @@ mod tests {
use common::BitSet;
use super::{SimpleUnion, *};
use crate::docset::{DocSet, TERMINATED};
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
use crate::postings::tests::test_skip_against_unoptimized;
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::union::bitset_union::BitSetPostingUnion;
@@ -254,6 +254,27 @@ mod tests {
vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000],
);
}
#[test]
fn test_buffered_union_seek_into_danger_zone_terminated() {
let scorer1 = ConstScorer::new(VecDocSet::from(vec![1, 2]), 1.0);
let scorer2 = ConstScorer::new(VecDocSet::from(vec![2, 3]), 1.0);
let mut union_scorer =
BufferedUnionScorer::build(vec![scorer1, scorer2], DoNothingCombiner::default, 100);
// Advance to end
while union_scorer.doc() != TERMINATED {
union_scorer.advance();
}
assert_eq!(union_scorer.doc(), TERMINATED);
assert_eq!(
union_scorer.seek_danger(TERMINATED),
SeekDangerResult::SeekLowerBound(TERMINATED)
);
}
}
#[cfg(all(test, feature = "unstable"))]

View File

@@ -1,5 +1,5 @@
use crate::docset::{DocSet, TERMINATED};
use crate::postings::{DocFreq, Postings};
use crate::postings::Postings;
use crate::DocId;
/// A `SimpleUnion` is a `DocSet` that is the union of multiple `DocSet`.
@@ -56,22 +56,6 @@ impl<TDocSet: Postings> Postings for SimpleUnion<TDocSet> {
term_freq
}
fn has_freq(&self) -> bool {
true
}
/// We do not know the actual document frequency, so we return
/// the maximum document frequency of the docsets.
fn doc_freq(&self) -> DocFreq {
let approximate_doc_freq = self
.docsets
.iter()
.map(|docset| u32::from(docset.doc_freq()))
.max()
.unwrap_or(0u32);
DocFreq::Approximate(approximate_doc_freq)
}
fn append_positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
for docset in &mut self.docsets {
let doc = docset.doc();

View File

@@ -17,6 +17,9 @@ pub struct VecDocSet {
impl From<Vec<DocId>> for VecDocSet {
fn from(doc_ids: Vec<DocId>) -> VecDocSet {
// We do not use `slice::is_sorted`, as we want to check for doc ids to be strictly
// sorted.
assert!(doc_ids.windows(2).all(|w| w[0] < w[1]));
VecDocSet { doc_ids, cursor: 0 }
}
}

View File

@@ -34,6 +34,31 @@ pub(crate) fn for_each_docset_buffered<T: DocSet + ?Sized>(
}
}
/// Calls `callback` with all of the `(doc, score)` for which score
/// is exceeding a given threshold.
///
/// This method is useful for the [`TopDocs`](crate::collector::TopDocs) collector.
/// For all docsets, the blanket implementation has the benefit
/// of prefiltering (doc, score) pairs, avoiding the
/// virtual dispatch cost.
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
pub(crate) fn for_each_pruning_scorer<TScorer: Scorer + ?Sized>(
scorer: &mut TScorer,
mut threshold: Score,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) {
let mut doc = scorer.doc();
while doc != TERMINATED {
let score = scorer.score();
if score > threshold {
threshold = callback(doc, score);
}
doc = scorer.advance();
}
}
/// A Weight is the specialization of a `Query`
/// for a given set of segments.
///
@@ -95,7 +120,6 @@ pub trait Weight: Send + Sync + 'static {
///
/// More importantly, it makes it possible for scorers to implement
/// important optimization (e.g. BlockWAND for union).
// TODO remove and move to scorer?
fn for_each_pruning(
&self,
threshold: Score,
@@ -103,7 +127,7 @@ pub trait Weight: Send + Sync + 'static {
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
scorer.for_each_pruning(threshold, callback);
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}
}

View File

@@ -7,7 +7,6 @@ use arc_swap::ArcSwap;
pub use warming::Warmer;
use self::warming::WarmingState;
use crate::codec::Codec;
use crate::core::searcher::{SearcherGeneration, SearcherInner};
use crate::directory::{Directory, WatchCallback, WatchHandle, META_LOCK};
use crate::store::DOCSTORE_CACHE_CAPACITY;
@@ -39,17 +38,17 @@ pub enum ReloadPolicy {
/// - number of warming threads, for parallelizing warming work
/// - The cache size of the underlying doc store readers.
#[derive(Clone)]
pub struct IndexReaderBuilder<C: Codec = crate::codec::StandardCodec> {
pub struct IndexReaderBuilder {
reload_policy: ReloadPolicy,
index: Index<C>,
index: Index,
warmers: Vec<Weak<dyn Warmer>>,
num_warming_threads: usize,
doc_store_cache_num_blocks: usize,
}
impl<C: Codec> IndexReaderBuilder<C> {
impl IndexReaderBuilder {
#[must_use]
pub(crate) fn new(index: Index<C>) -> IndexReaderBuilder<C> {
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
IndexReaderBuilder {
reload_policy: ReloadPolicy::OnCommitWithDelay,
index,
@@ -64,7 +63,7 @@ impl<C: Codec> IndexReaderBuilder<C> {
/// Building the reader is a non-trivial operation that requires
/// to open different segment readers. It may take hundreds of milliseconds
/// of time and it may return an error.
pub fn try_into(self) -> crate::Result<IndexReader<C>> {
pub fn try_into(self) -> crate::Result<IndexReader> {
let searcher_generation_inventory = Inventory::default();
let warming_state = WarmingState::new(
self.num_warming_threads,
@@ -107,7 +106,7 @@ impl<C: Codec> IndexReaderBuilder<C> {
///
/// See [`ReloadPolicy`] for more details.
#[must_use]
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder<C> {
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
self.reload_policy = reload_policy;
self
}
@@ -119,14 +118,14 @@ impl<C: Codec> IndexReaderBuilder<C> {
pub fn doc_store_cache_num_blocks(
mut self,
doc_store_cache_num_blocks: usize,
) -> IndexReaderBuilder<C> {
) -> IndexReaderBuilder {
self.doc_store_cache_num_blocks = doc_store_cache_num_blocks;
self
}
/// Set the [`Warmer`]s that are invoked when reloading searchable segments.
#[must_use]
pub fn warmers(mut self, warmers: Vec<Weak<dyn Warmer>>) -> IndexReaderBuilder<C> {
pub fn warmers(mut self, warmers: Vec<Weak<dyn Warmer>>) -> IndexReaderBuilder {
self.warmers = warmers;
self
}
@@ -136,33 +135,33 @@ impl<C: Codec> IndexReaderBuilder<C> {
/// This allows parallelizing warming work when there are multiple [`Warmer`] registered with
/// the [`IndexReader`].
#[must_use]
pub fn num_warming_threads(mut self, num_warming_threads: usize) -> IndexReaderBuilder<C> {
pub fn num_warming_threads(mut self, num_warming_threads: usize) -> IndexReaderBuilder {
self.num_warming_threads = num_warming_threads;
self
}
}
impl<C: Codec> TryInto<IndexReader<C>> for IndexReaderBuilder<C> {
impl TryInto<IndexReader> for IndexReaderBuilder {
type Error = crate::TantivyError;
fn try_into(self) -> crate::Result<IndexReader<C>> {
fn try_into(self) -> crate::Result<IndexReader> {
IndexReaderBuilder::try_into(self)
}
}
struct InnerIndexReader<C: Codec> {
struct InnerIndexReader {
doc_store_cache_num_blocks: usize,
index: Index<C>,
index: Index,
warming_state: WarmingState,
searcher: arc_swap::ArcSwap<SearcherInner>,
searcher_generation_counter: Arc<AtomicU64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl<C: Codec> InnerIndexReader<C> {
impl InnerIndexReader {
fn new(
doc_store_cache_num_blocks: usize,
index: Index<C>,
index: Index,
warming_state: WarmingState,
// The searcher_generation_inventory is not used as source, but as target to track the
// loaded segments.
@@ -190,7 +189,7 @@ impl<C: Codec> InnerIndexReader<C> {
///
/// This function acquires a lock to prevent GC from removing files
/// as we are opening our index.
fn open_segment_readers(index: &Index<C>) -> crate::Result<Vec<SegmentReader>> {
fn open_segment_readers(index: &Index) -> crate::Result<Vec<SegmentReader>> {
// Prevents segment files from getting deleted while we are in the process of opening them
let _meta_lock = index.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = index.searchable_segments()?;
@@ -213,7 +212,7 @@ impl<C: Codec> InnerIndexReader<C> {
}
fn create_searcher(
index: &Index<C>,
index: &Index,
doc_store_cache_num_blocks: usize,
warming_state: &WarmingState,
searcher_generation_counter: &Arc<AtomicU64>,
@@ -227,10 +226,9 @@ impl<C: Codec> InnerIndexReader<C> {
);
let schema = index.schema();
// SearcherInner uses Index<StandardCodec> since the codec doesn't affect reading
let searcher = Arc::new(SearcherInner::new(
schema,
index.with_standard_codec(),
index.clone(),
segment_readers,
searcher_generation,
doc_store_cache_num_blocks,
@@ -266,14 +264,14 @@ impl<C: Codec> InnerIndexReader<C> {
///
/// `IndexReader` just wraps an `Arc`.
#[derive(Clone)]
pub struct IndexReader<C: Codec = crate::codec::StandardCodec> {
inner: Arc<InnerIndexReader<C>>,
pub struct IndexReader {
inner: Arc<InnerIndexReader>,
_watch_handle_opt: Option<WatchHandle>,
}
impl<C: Codec> IndexReader<C> {
impl IndexReader {
#[cfg(test)]
pub(crate) fn index(&self) -> Index<C> {
pub(crate) fn index(&self) -> Index {
self.inner.index.clone()
}

View File

@@ -95,7 +95,7 @@ impl<'a> TermMerger<'a> {
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use rand::{rng, Rng};
use test::{self, Bencher};
use super::TermMerger;
@@ -117,9 +117,9 @@ mod bench {
let buffer: Vec<u8> = {
let mut terms = vec![];
for _i in 0..num_terms {
let rand_string: String = thread_rng()
let rand_string: String = rng()
.sample_iter(&Alphanumeric)
.take(thread_rng().gen_range(30..42))
.take(rng().random_range(30..42))
.map(char::from)
.collect();
terms.push(rand_string);

View File

@@ -25,7 +25,7 @@ zstd-compression = ["zstd"]
proptest = "1"
criterion = { version = "0.5", default-features = false }
names = "0.14"
rand = "0.8"
rand = "0.9"
[[bench]]
name = "stream_bench"

View File

@@ -10,9 +10,9 @@ use tantivy_sstable::{Dictionary, MonotonicU64SSTable};
const CHARSET: &[u8] = b"abcdefghij";
fn generate_key(rng: &mut impl Rng) -> String {
let len = rng.gen_range(3..12);
let len = rng.random_range(3..12);
std::iter::from_fn(|| {
let idx = rng.gen_range(0..CHARSET.len());
let idx = rng.random_range(0..CHARSET.len());
Some(CHARSET[idx] as char)
})
.take(len)

View File

@@ -23,12 +23,12 @@ name = "hashmap"
path = "example/hashmap.rs"
[dev-dependencies]
rand = "0.8.5"
rand = "0.9"
zipf = "7.0.0"
rustc-hash = "2.1.0"
proptest = "1.2.0"
binggan = { version = "0.14.0" }
rand_distr = "0.4.3"
rand_distr = "0.5"
[features]
compare_hash_only = ["ahash"] # Compare hash only, not the key in the Hashmap

Some files were not shown because too many files have changed in this diff Show More