mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-29 12:20:36 +00:00
Compare commits
7 Commits
postings-w
...
segmentrea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1555fe9f8 | ||
|
|
b86caeefe2 | ||
|
|
abf1e64f4d | ||
|
|
12977bc7c4 | ||
|
|
0c94eb94c3 | ||
|
|
c92e831dde | ||
|
|
947c0d5f40 |
12
Cargo.toml
12
Cargo.toml
@@ -27,7 +27,7 @@ regex = { version = "1.5.5", default-features = false, features = [
|
||||
aho-corasick = "1.0"
|
||||
tantivy-fst = "0.5"
|
||||
memmap2 = { version = "0.9.0", optional = true }
|
||||
lz4_flex = { version = "0.11", default-features = false, optional = true }
|
||||
lz4_flex = { version = "0.12", default-features = false, optional = true }
|
||||
zstd = { version = "0.13", optional = true, default-features = false }
|
||||
tempfile = { version = "3.12.0", optional = true }
|
||||
log = "0.4.16"
|
||||
@@ -50,7 +50,7 @@ fail = { version = "0.5.0", optional = true }
|
||||
time = { version = "0.3.35", features = ["serde-well-known"] }
|
||||
smallvec = "1.8.0"
|
||||
rayon = "1.5.2"
|
||||
lru = "0.12.0"
|
||||
lru = "0.16.3"
|
||||
fastdivide = "0.4.0"
|
||||
itertools = "0.14.0"
|
||||
measure_time = "0.9.0"
|
||||
@@ -76,7 +76,7 @@ winapi = "0.3.9"
|
||||
|
||||
[dev-dependencies]
|
||||
binggan = "0.14.2"
|
||||
rand = "0.8.5"
|
||||
rand = "0.9"
|
||||
maplit = "1.0.2"
|
||||
matches = "0.1.9"
|
||||
pretty_assertions = "1.2.1"
|
||||
@@ -85,7 +85,7 @@ test-log = "0.2.10"
|
||||
futures = "0.3.21"
|
||||
paste = "1.0.11"
|
||||
more-asserts = "0.3.1"
|
||||
rand_distr = "0.4.3"
|
||||
rand_distr = "0.5"
|
||||
time = { version = "0.3.10", features = ["serde-well-known", "macros"] }
|
||||
postcard = { version = "1.0.4", features = [
|
||||
"use-std",
|
||||
@@ -189,3 +189,7 @@ harness = false
|
||||
[[bench]]
|
||||
name = "bool_queries_with_range"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "str_search_and_get"
|
||||
harness = false
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use binggan::plugins::PeakMemAllocPlugin;
|
||||
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
|
||||
use rand::distributions::WeightedIndex;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::distr::weighted::WeightedIndex;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::seq::IndexedRandom;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_distr::Distribution;
|
||||
use serde_json::json;
|
||||
@@ -532,7 +532,7 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
||||
// Prepare 1000 unique terms sampled using a Zipf distribution.
|
||||
// Exponent ~1.1 approximates top-20 terms covering around ~20%.
|
||||
let terms_1000: Vec<String> = (1..=1000).map(|i| format!("term_{i}")).collect();
|
||||
let zipf_1000 = rand_distr::Zipf::new(1000, 1.1f64).unwrap();
|
||||
let zipf_1000 = rand_distr::Zipf::new(1000.0, 1.1f64).unwrap();
|
||||
|
||||
{
|
||||
let mut rng = StdRng::from_seed([1u8; 32]);
|
||||
@@ -576,8 +576,8 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
||||
}
|
||||
let _val_max = 1_000_000.0;
|
||||
for _ in 0..doc_with_value {
|
||||
let val: f64 = rng.gen_range(0.0..1_000_000.0);
|
||||
let json = if rng.gen_bool(0.1) {
|
||||
let val: f64 = rng.random_range(0.0..1_000_000.0);
|
||||
let json = if rng.random_bool(0.1) {
|
||||
// 10% are numeric values
|
||||
json!({ "mixed_type": val })
|
||||
} else {
|
||||
@@ -586,7 +586,7 @@ fn get_test_index_bench(cardinality: Cardinality) -> tantivy::Result<Index> {
|
||||
index_writer.add_document(doc!(
|
||||
text_field => "cool",
|
||||
json_field => json,
|
||||
text_field_all_unique_terms => format!("unique_term_{}", rng.gen::<u64>()),
|
||||
text_field_all_unique_terms => format!("unique_term_{}", rng.random::<u64>()),
|
||||
text_field_many_terms => many_terms_data.choose(&mut rng).unwrap().to_string(),
|
||||
text_field_few_terms_status => status_field_data[log_level_distribution.sample(&mut rng)].0,
|
||||
text_field_1000_terms_zipf => terms_1000[zipf_1000.sample(&mut rng) as usize - 1].as_str(),
|
||||
|
||||
@@ -55,29 +55,29 @@ fn build_shared_indices(num_docs: usize, p_a: f32, p_b: f32, p_c: f32) -> (Bench
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 500_000_000).unwrap();
|
||||
for _ in 0..num_docs {
|
||||
let has_a = rng.gen_bool(p_a as f64);
|
||||
let has_b = rng.gen_bool(p_b as f64);
|
||||
let has_c = rng.gen_bool(p_c as f64);
|
||||
let score = rng.gen_range(0u64..100u64);
|
||||
let score2 = rng.gen_range(0u64..100_000u64);
|
||||
let has_a = rng.random_bool(p_a as f64);
|
||||
let has_b = rng.random_bool(p_b as f64);
|
||||
let has_c = rng.random_bool(p_c as f64);
|
||||
let score = rng.random_range(0u64..100u64);
|
||||
let score2 = rng.random_range(0u64..100_000u64);
|
||||
let mut title_tokens: Vec<&str> = Vec::new();
|
||||
let mut body_tokens: Vec<&str> = Vec::new();
|
||||
if has_a {
|
||||
if rng.gen_bool(0.1) {
|
||||
if rng.random_bool(0.1) {
|
||||
title_tokens.push("a");
|
||||
} else {
|
||||
body_tokens.push("a");
|
||||
}
|
||||
}
|
||||
if has_b {
|
||||
if rng.gen_bool(0.1) {
|
||||
if rng.random_bool(0.1) {
|
||||
title_tokens.push("b");
|
||||
} else {
|
||||
body_tokens.push("b");
|
||||
}
|
||||
}
|
||||
if has_c {
|
||||
if rng.gen_bool(0.1) {
|
||||
if rng.random_bool(0.1) {
|
||||
title_tokens.push("c");
|
||||
} else {
|
||||
body_tokens.push("c");
|
||||
|
||||
@@ -36,13 +36,13 @@ fn build_shared_indices(num_docs: usize, p_title_a: f32, distribution: &str) ->
|
||||
"dense" => {
|
||||
for doc_id in 0..num_docs {
|
||||
// Always add title to avoid empty documents
|
||||
let title_token = if rng.gen_bool(p_title_a as f64) {
|
||||
let title_token = if rng.random_bool(p_title_a as f64) {
|
||||
"a"
|
||||
} else {
|
||||
"b"
|
||||
};
|
||||
|
||||
let num_rand = rng.gen_range(0u64..1000u64);
|
||||
let num_rand = rng.random_range(0u64..1000u64);
|
||||
|
||||
let num_asc = (doc_id / 10000) as u64;
|
||||
|
||||
@@ -60,13 +60,13 @@ fn build_shared_indices(num_docs: usize, p_title_a: f32, distribution: &str) ->
|
||||
"sparse" => {
|
||||
for doc_id in 0..num_docs {
|
||||
// Always add title to avoid empty documents
|
||||
let title_token = if rng.gen_bool(p_title_a as f64) {
|
||||
let title_token = if rng.random_bool(p_title_a as f64) {
|
||||
"a"
|
||||
} else {
|
||||
"b"
|
||||
};
|
||||
|
||||
let num_rand = rng.gen_range(0u64..10000000u64);
|
||||
let num_rand = rng.random_range(0u64..10000000u64);
|
||||
|
||||
let num_asc = doc_id as u64;
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ fn build_shared_indices(num_docs: usize, distribution: &str) -> BenchIndex {
|
||||
match distribution {
|
||||
"dense" => {
|
||||
for doc_id in 0..num_docs {
|
||||
let num_rand = rng.gen_range(0u64..1000u64);
|
||||
let num_rand = rng.random_range(0u64..1000u64);
|
||||
let num_asc = (doc_id / 10000) as u64;
|
||||
|
||||
writer
|
||||
@@ -46,7 +46,7 @@ fn build_shared_indices(num_docs: usize, distribution: &str) -> BenchIndex {
|
||||
}
|
||||
"sparse" => {
|
||||
for doc_id in 0..num_docs {
|
||||
let num_rand = rng.gen_range(0u64..10000000u64);
|
||||
let num_rand = rng.random_range(0u64..10000000u64);
|
||||
let num_asc = doc_id as u64;
|
||||
|
||||
writer
|
||||
|
||||
@@ -97,20 +97,20 @@ fn get_index_0_to_100() -> Index {
|
||||
let num_vals = 100_000;
|
||||
let docs: Vec<_> = (0..num_vals)
|
||||
.map(|_i| {
|
||||
let id_name = if rng.gen_bool(0.01) {
|
||||
let id_name = if rng.random_bool(0.01) {
|
||||
"veryfew".to_string() // 1%
|
||||
} else if rng.gen_bool(0.1) {
|
||||
} else if rng.random_bool(0.1) {
|
||||
"few".to_string() // 9%
|
||||
} else {
|
||||
"most".to_string() // 90%
|
||||
};
|
||||
Doc {
|
||||
id_name,
|
||||
id: rng.gen_range(0..100),
|
||||
id: rng.random_range(0..100),
|
||||
// Multiply by 1000, so that we create most buckets in the compact space
|
||||
// The benches depend on this range to select n-percent of elements with the
|
||||
// methods below.
|
||||
ip: Ipv6Addr::from_u128(rng.gen_range(0..100) * 1000),
|
||||
ip: Ipv6Addr::from_u128(rng.random_range(0..100) * 1000),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
421
benches/str_search_and_get.rs
Normal file
421
benches/str_search_and_get.rs
Normal file
@@ -0,0 +1,421 @@
|
||||
// This benchmark compares different approaches for retrieving string values:
|
||||
//
|
||||
// 1. Fast Field Approach: retrieves string values via term_ords() and ord_to_str()
|
||||
//
|
||||
// 2. Doc Store Approach: retrieves string values via searcher.doc() and field extraction
|
||||
//
|
||||
// The benchmark includes various data distributions:
|
||||
// - Dense Sequential: Sequential document IDs with dense data
|
||||
// - Dense Random: Random document IDs with dense data
|
||||
// - Sparse Sequential: Sequential document IDs with sparse data
|
||||
// - Sparse Random: Random document IDs with sparse data
|
||||
use std::ops::Bound;
|
||||
|
||||
use binggan::{black_box, BenchGroup, BenchRunner};
|
||||
use rand::prelude::*;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::SeedableRng;
|
||||
use tantivy::collector::{Count, DocSetCollector};
|
||||
use tantivy::query::RangeQuery;
|
||||
use tantivy::schema::document::TantivyDocument;
|
||||
use tantivy::schema::{Schema, Value, FAST, STORED, STRING};
|
||||
use tantivy::{doc, Index, ReloadPolicy, Searcher, Term};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BenchIndex {
|
||||
#[allow(dead_code)]
|
||||
index: Index,
|
||||
searcher: Searcher,
|
||||
}
|
||||
|
||||
fn build_shared_indices(num_docs: usize, distribution: &str) -> BenchIndex {
|
||||
// Schema with string fast field and stored field for doc access
|
||||
let mut schema_builder = Schema::builder();
|
||||
let f_str_fast = schema_builder.add_text_field("str_fast", STRING | STORED | FAST);
|
||||
let f_str_stored = schema_builder.add_text_field("str_stored", STRING | STORED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
// Populate index with stable RNG for reproducibility.
|
||||
let mut rng = StdRng::from_seed([7u8; 32]);
|
||||
|
||||
{
|
||||
let mut writer = index.writer_with_num_threads(1, 4_000_000_000).unwrap();
|
||||
|
||||
match distribution {
|
||||
"dense_random" => {
|
||||
for _doc_id in 0..num_docs {
|
||||
let suffix = rng.gen_range(0u64..1000u64);
|
||||
let str_val = format!("str_{:03}", suffix);
|
||||
|
||||
writer
|
||||
.add_document(doc!(
|
||||
f_str_fast=>str_val.clone(),
|
||||
f_str_stored=>str_val,
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
"dense_sequential" => {
|
||||
for doc_id in 0..num_docs {
|
||||
let suffix = doc_id as u64 % 1000;
|
||||
let str_val = format!("str_{:03}", suffix);
|
||||
|
||||
writer
|
||||
.add_document(doc!(
|
||||
f_str_fast=>str_val.clone(),
|
||||
f_str_stored=>str_val,
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
"sparse_random" => {
|
||||
for _doc_id in 0..num_docs {
|
||||
let suffix = rng.gen_range(0u64..1000000u64);
|
||||
let str_val = format!("str_{:07}", suffix);
|
||||
|
||||
writer
|
||||
.add_document(doc!(
|
||||
f_str_fast=>str_val.clone(),
|
||||
f_str_stored=>str_val,
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
"sparse_sequential" => {
|
||||
for doc_id in 0..num_docs {
|
||||
let suffix = doc_id as u64;
|
||||
let str_val = format!("str_{:07}", suffix);
|
||||
|
||||
writer
|
||||
.add_document(doc!(
|
||||
f_str_fast=>str_val.clone(),
|
||||
f_str_stored=>str_val,
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
panic!("Unsupported distribution type");
|
||||
}
|
||||
}
|
||||
writer.commit().unwrap();
|
||||
}
|
||||
|
||||
// Prepare reader/searcher once.
|
||||
let reader = index
|
||||
.reader_builder()
|
||||
.reload_policy(ReloadPolicy::Manual)
|
||||
.try_into()
|
||||
.unwrap();
|
||||
let searcher = reader.searcher();
|
||||
|
||||
BenchIndex { index, searcher }
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Prepare corpora with varying scenarios
|
||||
let scenarios = vec![
|
||||
(
|
||||
"dense_random_search_low_range".to_string(),
|
||||
1_000_000,
|
||||
"dense_random",
|
||||
0,
|
||||
9,
|
||||
),
|
||||
(
|
||||
"dense_random_search_high_range".to_string(),
|
||||
1_000_000,
|
||||
"dense_random",
|
||||
990,
|
||||
999,
|
||||
),
|
||||
(
|
||||
"dense_sequential_search_low_range".to_string(),
|
||||
1_000_000,
|
||||
"dense_sequential",
|
||||
0,
|
||||
9,
|
||||
),
|
||||
(
|
||||
"dense_sequential_search_high_range".to_string(),
|
||||
1_000_000,
|
||||
"dense_sequential",
|
||||
990,
|
||||
999,
|
||||
),
|
||||
(
|
||||
"sparse_random_search_low_range".to_string(),
|
||||
1_000_000,
|
||||
"sparse_random",
|
||||
0,
|
||||
9999,
|
||||
),
|
||||
(
|
||||
"sparse_random_search_high_range".to_string(),
|
||||
1_000_000,
|
||||
"sparse_random",
|
||||
990_000,
|
||||
999_999,
|
||||
),
|
||||
(
|
||||
"sparse_sequential_search_low_range".to_string(),
|
||||
1_000_000,
|
||||
"sparse_sequential",
|
||||
0,
|
||||
9999,
|
||||
),
|
||||
(
|
||||
"sparse_sequential_search_high_range".to_string(),
|
||||
1_000_000,
|
||||
"sparse_sequential",
|
||||
990_000,
|
||||
999_999,
|
||||
),
|
||||
];
|
||||
|
||||
let mut runner = BenchRunner::new();
|
||||
for (scenario_id, n, distribution, range_low, range_high) in scenarios {
|
||||
let bench_index = build_shared_indices(n, distribution);
|
||||
let mut group = runner.new_group();
|
||||
group.set_name(scenario_id);
|
||||
|
||||
let field = bench_index.searcher.schema().get_field("str_fast").unwrap();
|
||||
|
||||
let (lower_str, upper_str) =
|
||||
if distribution == "dense_sequential" || distribution == "dense_random" {
|
||||
(
|
||||
format!("str_{:03}", range_low),
|
||||
format!("str_{:03}", range_high),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
format!("str_{:07}", range_low),
|
||||
format!("str_{:07}", range_high),
|
||||
)
|
||||
};
|
||||
|
||||
let lower_term = Term::from_field_text(field, &lower_str);
|
||||
let upper_term = Term::from_field_text(field, &upper_str);
|
||||
|
||||
let query = RangeQuery::new(Bound::Included(lower_term), Bound::Included(upper_term));
|
||||
|
||||
run_benchmark_tasks(&mut group, &bench_index, query, range_low, range_high);
|
||||
|
||||
group.run();
|
||||
}
|
||||
}
|
||||
|
||||
/// Run all benchmark tasks for a given range query
|
||||
fn run_benchmark_tasks(
|
||||
bench_group: &mut BenchGroup,
|
||||
bench_index: &BenchIndex,
|
||||
query: RangeQuery,
|
||||
range_low: u64,
|
||||
range_high: u64,
|
||||
) {
|
||||
// Test count of matching documents
|
||||
add_bench_task_count(
|
||||
bench_group,
|
||||
bench_index,
|
||||
query.clone(),
|
||||
range_low,
|
||||
range_high,
|
||||
);
|
||||
|
||||
// Test fetching all DocIds of matching documents
|
||||
add_bench_task_docset(
|
||||
bench_group,
|
||||
bench_index,
|
||||
query.clone(),
|
||||
range_low,
|
||||
range_high,
|
||||
);
|
||||
|
||||
// Test fetching all string fast field values of matching documents
|
||||
add_bench_task_fetch_all_strings(
|
||||
bench_group,
|
||||
bench_index,
|
||||
query.clone(),
|
||||
range_low,
|
||||
range_high,
|
||||
);
|
||||
|
||||
// Test fetching all string values of matching documents through doc() method
|
||||
add_bench_task_fetch_all_strings_from_doc(
|
||||
bench_group,
|
||||
bench_index,
|
||||
query,
|
||||
range_low,
|
||||
range_high,
|
||||
);
|
||||
}
|
||||
|
||||
fn add_bench_task_count(
|
||||
bench_group: &mut BenchGroup,
|
||||
bench_index: &BenchIndex,
|
||||
query: RangeQuery,
|
||||
range_low: u64,
|
||||
range_high: u64,
|
||||
) {
|
||||
let task_name = format!("string_search_count_[{}-{}]", range_low, range_high);
|
||||
|
||||
let search_task = CountSearchTask {
|
||||
searcher: bench_index.searcher.clone(),
|
||||
query,
|
||||
};
|
||||
bench_group.register(task_name, move |_| black_box(search_task.run()));
|
||||
}
|
||||
|
||||
fn add_bench_task_docset(
|
||||
bench_group: &mut BenchGroup,
|
||||
bench_index: &BenchIndex,
|
||||
query: RangeQuery,
|
||||
range_low: u64,
|
||||
range_high: u64,
|
||||
) {
|
||||
let task_name = format!("string_fetch_all_docset_[{}-{}]", range_low, range_high);
|
||||
|
||||
let search_task = DocSetSearchTask {
|
||||
searcher: bench_index.searcher.clone(),
|
||||
query,
|
||||
};
|
||||
bench_group.register(task_name, move |_| black_box(search_task.run()));
|
||||
}
|
||||
|
||||
fn add_bench_task_fetch_all_strings(
|
||||
bench_group: &mut BenchGroup,
|
||||
bench_index: &BenchIndex,
|
||||
query: RangeQuery,
|
||||
range_low: u64,
|
||||
range_high: u64,
|
||||
) {
|
||||
let task_name = format!(
|
||||
"string_fastfield_fetch_all_strings_[{}-{}]",
|
||||
range_low, range_high
|
||||
);
|
||||
|
||||
let search_task = FetchAllStringsSearchTask {
|
||||
searcher: bench_index.searcher.clone(),
|
||||
query,
|
||||
};
|
||||
|
||||
bench_group.register(task_name, move |_| {
|
||||
let result = black_box(search_task.run());
|
||||
result.len()
|
||||
});
|
||||
}
|
||||
|
||||
fn add_bench_task_fetch_all_strings_from_doc(
|
||||
bench_group: &mut BenchGroup,
|
||||
bench_index: &BenchIndex,
|
||||
query: RangeQuery,
|
||||
range_low: u64,
|
||||
range_high: u64,
|
||||
) {
|
||||
let task_name = format!(
|
||||
"string_doc_fetch_all_strings_[{}-{}]",
|
||||
range_low, range_high
|
||||
);
|
||||
|
||||
let search_task = FetchAllStringsFromDocTask {
|
||||
searcher: bench_index.searcher.clone(),
|
||||
query,
|
||||
};
|
||||
|
||||
bench_group.register(task_name, move |_| {
|
||||
let result = black_box(search_task.run());
|
||||
result.len()
|
||||
});
|
||||
}
|
||||
|
||||
struct CountSearchTask {
|
||||
searcher: Searcher,
|
||||
query: RangeQuery,
|
||||
}
|
||||
|
||||
impl CountSearchTask {
|
||||
#[inline(never)]
|
||||
pub fn run(&self) -> usize {
|
||||
self.searcher.search(&self.query, &Count).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
struct DocSetSearchTask {
|
||||
searcher: Searcher,
|
||||
query: RangeQuery,
|
||||
}
|
||||
|
||||
impl DocSetSearchTask {
|
||||
#[inline(never)]
|
||||
pub fn run(&self) -> usize {
|
||||
let result = self.searcher.search(&self.query, &DocSetCollector).unwrap();
|
||||
result.len()
|
||||
}
|
||||
}
|
||||
|
||||
struct FetchAllStringsSearchTask {
|
||||
searcher: Searcher,
|
||||
query: RangeQuery,
|
||||
}
|
||||
|
||||
impl FetchAllStringsSearchTask {
|
||||
#[inline(never)]
|
||||
pub fn run(&self) -> Vec<String> {
|
||||
let doc_addresses = self.searcher.search(&self.query, &DocSetCollector).unwrap();
|
||||
let mut docs = doc_addresses.into_iter().collect::<Vec<_>>();
|
||||
docs.sort();
|
||||
let mut strings = Vec::with_capacity(docs.len());
|
||||
|
||||
for doc_address in docs {
|
||||
let segment_reader = &self.searcher.segment_readers()[doc_address.segment_ord as usize];
|
||||
let str_column_opt = segment_reader.fast_fields().str("str_fast");
|
||||
|
||||
if let Ok(Some(str_column)) = str_column_opt {
|
||||
let doc_id = doc_address.doc_id;
|
||||
let term_ord = str_column.term_ords(doc_id).next().unwrap();
|
||||
let mut str_buffer = String::new();
|
||||
if str_column.ord_to_str(term_ord, &mut str_buffer).is_ok() {
|
||||
strings.push(str_buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strings
|
||||
}
|
||||
}
|
||||
|
||||
struct FetchAllStringsFromDocTask {
|
||||
searcher: Searcher,
|
||||
query: RangeQuery,
|
||||
}
|
||||
|
||||
impl FetchAllStringsFromDocTask {
|
||||
#[inline(never)]
|
||||
pub fn run(&self) -> Vec<String> {
|
||||
let doc_addresses = self.searcher.search(&self.query, &DocSetCollector).unwrap();
|
||||
let mut docs = doc_addresses.into_iter().collect::<Vec<_>>();
|
||||
docs.sort();
|
||||
let mut strings = Vec::with_capacity(docs.len());
|
||||
|
||||
let str_stored_field = self
|
||||
.searcher
|
||||
.schema()
|
||||
.get_field("str_stored")
|
||||
.expect("str_stored field should exist");
|
||||
|
||||
for doc_address in docs {
|
||||
// Get the document from the doc store (row store access)
|
||||
if let Ok(doc) = self.searcher.doc::<TantivyDocument>(doc_address) {
|
||||
// Extract string values from the stored field
|
||||
if let Some(field_value) = doc.get_first(str_stored_field) {
|
||||
if let Some(text) = field_value.as_value().as_str() {
|
||||
strings.push(text.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strings
|
||||
}
|
||||
}
|
||||
@@ -18,5 +18,5 @@ homepage = "https://github.com/quickwit-oss/tantivy"
|
||||
bitpacking = { version = "0.9.2", default-features = false, features = ["bitpacker1x"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
proptest = "1"
|
||||
|
||||
@@ -4,8 +4,8 @@ extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::rng;
|
||||
use rand::seq::IteratorRandom;
|
||||
use rand::thread_rng;
|
||||
use tantivy_bitpacker::{BitPacker, BitUnpacker, BlockedBitpacker};
|
||||
use test::Bencher;
|
||||
|
||||
@@ -27,7 +27,7 @@ mod tests {
|
||||
let num_els = 1_000_000u32;
|
||||
let bit_unpacker = BitUnpacker::new(bit_width);
|
||||
let data = create_bitpacked_data(bit_width, num_els);
|
||||
let idxs: Vec<u32> = (0..num_els).choose_multiple(&mut thread_rng(), 100_000);
|
||||
let idxs: Vec<u32> = (0..num_els).choose_multiple(&mut rng(), 100_000);
|
||||
b.iter(|| {
|
||||
let mut out = 0u64;
|
||||
for &idx in &idxs {
|
||||
|
||||
@@ -22,7 +22,7 @@ downcast-rs = "2.0.1"
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
more-asserts = "0.3.1"
|
||||
rand = "0.8"
|
||||
rand = "0.9"
|
||||
binggan = "0.14.0"
|
||||
|
||||
[[bench]]
|
||||
|
||||
@@ -9,7 +9,7 @@ use tantivy_columnar::column_values::{CodecType, serialize_and_load_u64_based_co
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55_000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.map(|num| num + rng.random::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
|
||||
@@ -6,7 +6,7 @@ use tantivy_columnar::column_values::{CodecType, serialize_u64_based_column_valu
|
||||
fn get_data() -> Vec<u64> {
|
||||
let mut rng = StdRng::seed_from_u64(2u64);
|
||||
let mut data: Vec<_> = (100..55_000_u64)
|
||||
.map(|num| num + rng.r#gen::<u8>() as u64)
|
||||
.map(|num| num + rng.random::<u8>() as u64)
|
||||
.collect();
|
||||
data.push(99_000);
|
||||
data.insert(1000, 2000);
|
||||
|
||||
@@ -8,7 +8,7 @@ const TOTAL_NUM_VALUES: u32 = 1_000_000;
|
||||
fn gen_optional_index(fill_ratio: f64) -> OptionalIndex {
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let vals: Vec<u32> = (0..TOTAL_NUM_VALUES)
|
||||
.map(|_| rng.gen_bool(fill_ratio))
|
||||
.map(|_| rng.random_bool(fill_ratio))
|
||||
.enumerate()
|
||||
.filter(|(_pos, val)| *val)
|
||||
.map(|(pos, _)| pos as u32)
|
||||
@@ -25,7 +25,7 @@ fn random_range_iterator(
|
||||
let mut rng: StdRng = StdRng::from_seed([1u8; 32]);
|
||||
let mut current = start;
|
||||
std::iter::from_fn(move || {
|
||||
current += rng.gen_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
current += rng.random_range(avg_step_size - avg_deviation..=avg_step_size + avg_deviation);
|
||||
if current >= end { None } else { Some(current) }
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ fn get_data_50percent_item() -> Vec<u128> {
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
let val = rng.random_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
@@ -34,7 +34,7 @@ fn get_data_50percent_item() -> Vec<u128> {
|
||||
|
||||
let mut data = vec![];
|
||||
for _ in 0..300_000 {
|
||||
let val = rng.gen_range(1..=100);
|
||||
let val = rng.random_range(1..=100);
|
||||
data.push(val);
|
||||
}
|
||||
data.push(SINGLE_ITEM);
|
||||
|
||||
@@ -268,7 +268,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
for _ in 0..50 {
|
||||
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
|
||||
create_and_validate::<LinearCodec>(&data, "random");
|
||||
|
||||
@@ -122,7 +122,7 @@ pub(crate) fn create_and_validate<TColumnCodec: ColumnCodec>(
|
||||
assert_eq!(vals, buffer);
|
||||
|
||||
if !vals.is_empty() {
|
||||
let test_rand_idx = rand::thread_rng().gen_range(0..=vals.len() - 1);
|
||||
let test_rand_idx = rand::rng().random_range(0..=vals.len() - 1);
|
||||
let expected_positions: Vec<u32> = vals
|
||||
.iter()
|
||||
.enumerate()
|
||||
|
||||
@@ -21,5 +21,5 @@ serde = { version = "1.0.136", features = ["derive"] }
|
||||
[dev-dependencies]
|
||||
binggan = "0.14.0"
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.4"
|
||||
rand = "0.9"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use binggan::{BenchRunner, black_box};
|
||||
use rand::rng;
|
||||
use rand::seq::IteratorRandom;
|
||||
use rand::thread_rng;
|
||||
use tantivy_common::{BitSet, TinySet, serialize_vint_u32};
|
||||
|
||||
fn bench_vint() {
|
||||
@@ -17,7 +17,7 @@ fn bench_vint() {
|
||||
black_box(out);
|
||||
});
|
||||
|
||||
let vals: Vec<u32> = (0..20_000).choose_multiple(&mut thread_rng(), 100_000);
|
||||
let vals: Vec<u32> = (0..20_000).choose_multiple(&mut rng(), 100_000);
|
||||
runner.bench_function("bench_vint_rand", move |_| {
|
||||
let mut out = 0u64;
|
||||
for val in vals.iter().cloned() {
|
||||
|
||||
@@ -416,7 +416,7 @@ mod tests {
|
||||
use std::collections::HashSet;
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use rand::distributions::Bernoulli;
|
||||
use rand::distr::Bernoulli;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ impl Collector for StatsCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> tantivy::Result<StatsSegmentCollector> {
|
||||
let fast_field_reader = segment_reader.fast_fields().u64(&self.field)?;
|
||||
Ok(StatsSegmentCollector {
|
||||
|
||||
@@ -65,7 +65,7 @@ fn main() -> tantivy::Result<()> {
|
||||
);
|
||||
let top_docs_by_custom_score =
|
||||
// Call TopDocs with a custom tweak score
|
||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &SegmentReader| {
|
||||
TopDocs::with_limit(2).tweak_score(move |segment_reader: &dyn SegmentReader| {
|
||||
let ingredient_reader = segment_reader.facet_reader("ingredient").unwrap();
|
||||
let facet_dict = ingredient_reader.facet_dict();
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ impl DynamicPriceColumn {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn price_for_segment(&self, segment_reader: &SegmentReader) -> Option<Arc<Vec<Price>>> {
|
||||
pub fn price_for_segment(&self, segment_reader: &dyn SegmentReader) -> Option<Arc<Vec<Price>>> {
|
||||
let segment_key = (segment_reader.segment_id(), segment_reader.delete_opstamp());
|
||||
self.price_cache.read().unwrap().get(&segment_key).cloned()
|
||||
}
|
||||
@@ -157,7 +157,7 @@ fn main() -> tantivy::Result<()> {
|
||||
let query = query_parser.parse_query("cooking")?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
let score_by_price = move |segment_reader: &SegmentReader| {
|
||||
let score_by_price = move |segment_reader: &dyn SegmentReader| {
|
||||
let price = price_dynamic_column
|
||||
.price_for_segment(segment_reader)
|
||||
.unwrap();
|
||||
|
||||
@@ -57,7 +57,7 @@ pub(crate) fn get_numeric_or_date_column_types() -> &'static [ColumnType] {
|
||||
|
||||
/// Get fast field reader or empty as default.
|
||||
pub(crate) fn get_ff_reader(
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
field_name: &str,
|
||||
allowed_column_types: Option<&[ColumnType]>,
|
||||
) -> crate::Result<(columnar::Column<u64>, ColumnType)> {
|
||||
@@ -74,7 +74,7 @@ pub(crate) fn get_ff_reader(
|
||||
}
|
||||
|
||||
pub(crate) fn get_dynamic_columns(
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
field_name: &str,
|
||||
) -> crate::Result<Vec<columnar::DynamicColumn>> {
|
||||
let ff_fields = reader.fast_fields().dynamic_column_handles(field_name)?;
|
||||
@@ -90,7 +90,7 @@ pub(crate) fn get_dynamic_columns(
|
||||
///
|
||||
/// Is guaranteed to return at least one column.
|
||||
pub(crate) fn get_all_ff_reader_or_empty(
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
field_name: &str,
|
||||
allowed_column_types: Option<&[ColumnType]>,
|
||||
fallback_type: ColumnType,
|
||||
|
||||
@@ -469,7 +469,7 @@ impl AggKind {
|
||||
/// Build AggregationsData by walking the request tree.
|
||||
pub(crate) fn build_aggregations_data_from_req(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
context: AggContextParams,
|
||||
) -> crate::Result<AggregationsSegmentCtx> {
|
||||
@@ -489,7 +489,7 @@ pub(crate) fn build_aggregations_data_from_req(
|
||||
fn build_nodes(
|
||||
agg_name: &str,
|
||||
req: &Aggregation,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
data: &mut AggregationsSegmentCtx,
|
||||
is_top_level: bool,
|
||||
@@ -728,7 +728,6 @@ fn build_nodes(
|
||||
let idx_in_req_data = data.push_filter_req_data(FilterAggReqData {
|
||||
name: agg_name.to_string(),
|
||||
req: filter_req.clone(),
|
||||
segment_reader: reader.clone(),
|
||||
evaluator,
|
||||
matching_docs_buffer,
|
||||
is_top_level,
|
||||
@@ -745,7 +744,7 @@ fn build_nodes(
|
||||
|
||||
fn build_children(
|
||||
aggs: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
data: &mut AggregationsSegmentCtx,
|
||||
) -> crate::Result<Vec<AggRefNode>> {
|
||||
@@ -764,7 +763,7 @@ fn build_children(
|
||||
}
|
||||
|
||||
fn get_term_agg_accessors(
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
field_name: &str,
|
||||
missing: &Option<Key>,
|
||||
) -> crate::Result<Vec<(Column<u64>, ColumnType)>> {
|
||||
@@ -817,7 +816,7 @@ fn build_terms_or_cardinality_nodes(
|
||||
agg_name: &str,
|
||||
field_name: &str,
|
||||
missing: &Option<Key>,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
data: &mut AggregationsSegmentCtx,
|
||||
sub_aggs: &Aggregations,
|
||||
|
||||
@@ -401,8 +401,6 @@ pub struct FilterAggReqData {
|
||||
pub name: String,
|
||||
/// The filter aggregation
|
||||
pub req: FilterAggregation,
|
||||
/// The segment reader
|
||||
pub segment_reader: SegmentReader,
|
||||
/// Document evaluator for the filter query (precomputed BitSet)
|
||||
/// This is built once when the request data is created
|
||||
pub evaluator: DocumentQueryEvaluator,
|
||||
@@ -414,9 +412,8 @@ pub struct FilterAggReqData {
|
||||
|
||||
impl FilterAggReqData {
|
||||
pub(crate) fn get_memory_consumption(&self) -> usize {
|
||||
// Estimate: name + segment reader reference + bitset + buffer capacity
|
||||
// Estimate: name + bitset + buffer capacity
|
||||
self.name.len()
|
||||
+ std::mem::size_of::<SegmentReader>()
|
||||
+ self.evaluator.bitset.len() / 8 // BitSet memory (bits to bytes)
|
||||
+ self.matching_docs_buffer.capacity() * std::mem::size_of::<DocId>()
|
||||
+ std::mem::size_of::<bool>()
|
||||
@@ -438,7 +435,7 @@ impl DocumentQueryEvaluator {
|
||||
pub(crate) fn new(
|
||||
query: Box<dyn Query>,
|
||||
schema: Schema,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self> {
|
||||
let max_doc = segment_reader.max_doc();
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ impl Collector for DistributedAggregationCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
@@ -96,7 +96,7 @@ impl Collector for AggregationCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
reader: &crate::SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
AggregationSegmentCollector::from_agg_req_and_reader(
|
||||
&self.agg,
|
||||
@@ -145,7 +145,7 @@ impl AggregationSegmentCollector {
|
||||
/// reader. Also includes validation, e.g. checking field types and existence.
|
||||
pub fn from_agg_req_and_reader(
|
||||
agg: &Aggregations,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
segment_ordinal: SegmentOrdinal,
|
||||
context: &AggContextParams,
|
||||
) -> crate::Result<Self> {
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
mod postings;
|
||||
mod standard;
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use standard::StandardCodec;
|
||||
|
||||
pub trait Codec: Clone + std::fmt::Debug + Send + Sync + 'static {
|
||||
type PostingsCodec;
|
||||
|
||||
const NAME: &'static str;
|
||||
|
||||
fn from_json_props(json_value: &serde_json::Value) -> crate::Result<Self>;
|
||||
fn to_json_props(&self) -> serde_json::Value;
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct CodecConfiguration {
|
||||
name: Cow<'static, str>,
|
||||
#[serde(default, skip_serializing_if = "serde_json::Value::is_null")]
|
||||
props: serde_json::Value,
|
||||
}
|
||||
|
||||
impl CodecConfiguration {
|
||||
pub fn from_codec<C: Codec>(codec: &C) -> Self {
|
||||
CodecConfiguration {
|
||||
name: Cow::Borrowed(C::NAME),
|
||||
props: codec.to_json_props(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_codec<C: Codec>(&self) -> crate::Result<C> {
|
||||
if self.name != C::NAME {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"Codec name mismatch: expected {}, got {}",
|
||||
C::NAME,
|
||||
self.name
|
||||
)));
|
||||
}
|
||||
C::from_json_props(&self.props)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CodecConfiguration {
|
||||
fn default() -> Self {
|
||||
CodecConfiguration::from_codec(&StandardCodec)
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
use std::io;
|
||||
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score};
|
||||
|
||||
pub trait PostingsCodec {
|
||||
type PostingsSerializer: PostingsSerializer;
|
||||
}
|
||||
|
||||
pub trait PostingsSerializer {
|
||||
fn new(
|
||||
avg_fieldnorm: Score,
|
||||
mode: IndexRecordOption,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
) -> Self;
|
||||
|
||||
fn new_term(&mut self, term_doc_freq: u32, record_term_freq: bool);
|
||||
|
||||
fn write_doc(&mut self, doc_id: DocId, term_freq: u32);
|
||||
|
||||
fn close_term(&mut self, doc_freq: u32, wrt: &mut impl io::Write) -> io::Result<()>;
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::codec::standard::postings::StandardPostingsCodec;
|
||||
use crate::codec::Codec;
|
||||
|
||||
mod postings;
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct StandardCodec;
|
||||
|
||||
impl Codec for StandardCodec {
|
||||
type PostingsCodec = StandardPostingsCodec;
|
||||
|
||||
const NAME: &'static str = "standard";
|
||||
|
||||
fn from_json_props(json_value: &serde_json::Value) -> crate::Result<Self> {
|
||||
if !json_value.is_null() {
|
||||
return Err(crate::TantivyError::InvalidArgument(format!(
|
||||
"Codec property for the StandardCodec are unexpected. expected null, got {}",
|
||||
json_value.as_str().unwrap_or("null")
|
||||
)));
|
||||
}
|
||||
Ok(StandardCodec)
|
||||
}
|
||||
|
||||
fn to_json_props(&self) -> serde_json::Value {
|
||||
serde_json::Value::Null
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::DocId;
|
||||
|
||||
pub struct Block {
|
||||
doc_ids: [DocId; COMPRESSION_BLOCK_SIZE],
|
||||
term_freqs: [u32; COMPRESSION_BLOCK_SIZE],
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
pub fn new() -> Self {
|
||||
Block {
|
||||
doc_ids: [0u32; COMPRESSION_BLOCK_SIZE],
|
||||
term_freqs: [0u32; COMPRESSION_BLOCK_SIZE],
|
||||
len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn doc_ids(&self) -> &[DocId] {
|
||||
&self.doc_ids[..self.len]
|
||||
}
|
||||
|
||||
pub fn term_freqs(&self) -> &[u32] {
|
||||
&self.term_freqs[..self.len]
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.len = 0;
|
||||
}
|
||||
|
||||
pub fn append_doc(&mut self, doc: DocId, term_freq: u32) {
|
||||
let len = self.len;
|
||||
self.doc_ids[len] = doc;
|
||||
self.term_freqs[len] = term_freq;
|
||||
self.len = len + 1;
|
||||
}
|
||||
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.len == COMPRESSION_BLOCK_SIZE
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
|
||||
pub fn last_doc(&self) -> DocId {
|
||||
assert_eq!(self.len, COMPRESSION_BLOCK_SIZE);
|
||||
self.doc_ids[COMPRESSION_BLOCK_SIZE - 1]
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
use crate::codec::postings::PostingsCodec;
|
||||
|
||||
mod block;
|
||||
mod postings_serializer;
|
||||
mod skip;
|
||||
|
||||
pub use postings_serializer::StandardPostingsSerializer;
|
||||
|
||||
pub struct StandardPostingsCodec;
|
||||
|
||||
impl PostingsCodec for StandardPostingsCodec {
|
||||
type PostingsSerializer = StandardPostingsSerializer;
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::io::{self, Write as _};
|
||||
|
||||
use common::{BinarySerializable as _, VInt};
|
||||
|
||||
use crate::codec::postings::PostingsSerializer;
|
||||
use crate::codec::standard::postings::block::Block;
|
||||
use crate::codec::standard::postings::skip::SkipSerializer;
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::compression::{BlockEncoder, VIntEncoder as _, COMPRESSION_BLOCK_SIZE};
|
||||
use crate::query::Bm25Weight;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score};
|
||||
|
||||
pub struct StandardPostingsSerializer {
|
||||
last_doc_id_encoded: u32,
|
||||
|
||||
block_encoder: BlockEncoder,
|
||||
block: Box<Block>,
|
||||
|
||||
postings_write: Vec<u8>,
|
||||
skip_write: SkipSerializer,
|
||||
|
||||
mode: IndexRecordOption,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
|
||||
bm25_weight: Option<Bm25Weight>,
|
||||
avg_fieldnorm: Score, /* Average number of term in the field for that segment.
|
||||
* this value is used to compute the block wand information. */
|
||||
term_has_freq: bool,
|
||||
}
|
||||
|
||||
impl PostingsSerializer for StandardPostingsSerializer {
|
||||
fn new(
|
||||
avg_fieldnorm: Score,
|
||||
mode: IndexRecordOption,
|
||||
fieldnorm_reader: Option<FieldNormReader>,
|
||||
) -> StandardPostingsSerializer {
|
||||
Self {
|
||||
block_encoder: BlockEncoder::new(),
|
||||
block: Box::new(Block::new()),
|
||||
|
||||
postings_write: Vec::new(),
|
||||
skip_write: SkipSerializer::new(),
|
||||
|
||||
last_doc_id_encoded: 0u32,
|
||||
mode,
|
||||
|
||||
fieldnorm_reader,
|
||||
bm25_weight: None,
|
||||
avg_fieldnorm,
|
||||
term_has_freq: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn new_term(&mut self, term_doc_freq: u32, record_term_freq: bool) {
|
||||
self.bm25_weight = None;
|
||||
|
||||
self.term_has_freq = self.mode.has_freq() && record_term_freq;
|
||||
if !self.term_has_freq {
|
||||
return;
|
||||
}
|
||||
|
||||
let num_docs_in_segment: u64 =
|
||||
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
|
||||
fieldnorm_reader.num_docs() as u64
|
||||
} else {
|
||||
return;
|
||||
};
|
||||
|
||||
if num_docs_in_segment == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
self.bm25_weight = Some(Bm25Weight::for_one_term_without_explain(
|
||||
term_doc_freq as u64,
|
||||
num_docs_in_segment,
|
||||
self.avg_fieldnorm,
|
||||
));
|
||||
}
|
||||
|
||||
fn write_doc(&mut self, doc_id: DocId, term_freq: u32) {
|
||||
self.block.append_doc(doc_id, term_freq);
|
||||
if self.block.is_full() {
|
||||
self.write_block();
|
||||
}
|
||||
}
|
||||
|
||||
fn close_term(
|
||||
&mut self,
|
||||
doc_freq: u32,
|
||||
output_write: &mut impl std::io::Write,
|
||||
) -> io::Result<()> {
|
||||
if !self.block.is_empty() {
|
||||
// we have doc ids waiting to be written
|
||||
// this happens when the number of doc ids is
|
||||
// not a perfect multiple of our block size.
|
||||
//
|
||||
// In that case, the remaining part is encoded
|
||||
// using variable int encoding.
|
||||
{
|
||||
let block_encoded = self
|
||||
.block_encoder
|
||||
.compress_vint_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
|
||||
self.postings_write.write_all(block_encoded)?;
|
||||
}
|
||||
// ... Idem for term frequencies
|
||||
if self.term_has_freq {
|
||||
let block_encoded = self
|
||||
.block_encoder
|
||||
.compress_vint_unsorted(self.block.term_freqs());
|
||||
self.postings_write.write_all(block_encoded)?;
|
||||
}
|
||||
self.block.clear();
|
||||
}
|
||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
let skip_data = self.skip_write.data();
|
||||
VInt(skip_data.len() as u64).serialize(output_write)?;
|
||||
output_write.write_all(skip_data)?;
|
||||
}
|
||||
output_write.write_all(&self.postings_write[..])?;
|
||||
self.skip_write.clear();
|
||||
self.postings_write.clear();
|
||||
self.bm25_weight = None;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl StandardPostingsSerializer {
|
||||
fn write_block(&mut self) {
|
||||
{
|
||||
// encode the doc ids
|
||||
let (num_bits, block_encoded): (u8, &[u8]) = self
|
||||
.block_encoder
|
||||
.compress_block_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
|
||||
self.last_doc_id_encoded = self.block.last_doc();
|
||||
self.skip_write
|
||||
.write_doc(self.last_doc_id_encoded, num_bits);
|
||||
// last el block 0, offset block 1,
|
||||
self.postings_write.extend(block_encoded);
|
||||
}
|
||||
if self.term_has_freq {
|
||||
let (num_bits, block_encoded): (u8, &[u8]) = self
|
||||
.block_encoder
|
||||
.compress_block_unsorted(self.block.term_freqs(), true);
|
||||
self.postings_write.extend(block_encoded);
|
||||
self.skip_write.write_term_freq(num_bits);
|
||||
if self.mode.has_positions() {
|
||||
// We serialize the sum of term freqs within the skip information
|
||||
// in order to navigate through positions.
|
||||
let sum_freq = self.block.term_freqs().iter().cloned().sum();
|
||||
self.skip_write.write_total_term_freq(sum_freq);
|
||||
}
|
||||
let mut blockwand_params = (0u8, 0u32);
|
||||
if let Some(bm25_weight) = self.bm25_weight.as_ref() {
|
||||
if let Some(fieldnorm_reader) = self.fieldnorm_reader.as_ref() {
|
||||
let docs = self.block.doc_ids().iter().cloned();
|
||||
let term_freqs = self.block.term_freqs().iter().cloned();
|
||||
let fieldnorms = docs.map(|doc| fieldnorm_reader.fieldnorm_id(doc));
|
||||
blockwand_params = fieldnorms
|
||||
.zip(term_freqs)
|
||||
.max_by(
|
||||
|(left_fieldnorm_id, left_term_freq),
|
||||
(right_fieldnorm_id, right_term_freq)| {
|
||||
let left_score =
|
||||
bm25_weight.tf_factor(*left_fieldnorm_id, *left_term_freq);
|
||||
let right_score =
|
||||
bm25_weight.tf_factor(*right_fieldnorm_id, *right_term_freq);
|
||||
left_score
|
||||
.partial_cmp(&right_score)
|
||||
.unwrap_or(Ordering::Equal)
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
let (fieldnorm_id, term_freq) = blockwand_params;
|
||||
self.skip_write.write_blockwand_max(fieldnorm_id, term_freq);
|
||||
}
|
||||
self.block.clear();
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
self.block.clear();
|
||||
self.last_doc_id_encoded = 0;
|
||||
}
|
||||
}
|
||||
@@ -1,448 +0,0 @@
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE};
|
||||
use crate::query::Bm25Weight;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::{DocId, Score, TERMINATED};
|
||||
|
||||
// doc num bits uses the following encoding:
|
||||
// given 0b a b cdefgh
|
||||
// |1|2|3| 4 |
|
||||
// - 1: unused
|
||||
// - 2: is delta-1 encoded. 0 if not, 1, if yes
|
||||
// - 3: unused
|
||||
// - 4: a 5 bit number in 0..32, the actual bitwidth. Bitpacking could in theory say this is 32
|
||||
// (requiring a 6th bit), but the biggest doc_id we can want to encode is TERMINATED-1, which can
|
||||
// be represented on 31b without delta encoding.
|
||||
fn encode_bitwidth(bitwidth: u8, delta_1: bool) -> u8 {
|
||||
assert!(bitwidth < 32);
|
||||
bitwidth | ((delta_1 as u8) << 6)
|
||||
}
|
||||
|
||||
fn decode_bitwidth(raw_bitwidth: u8) -> (u8, bool) {
|
||||
let delta_1 = ((raw_bitwidth >> 6) & 1) != 0;
|
||||
let bitwidth = raw_bitwidth & 0x1f;
|
||||
(bitwidth, delta_1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn encode_block_wand_max_tf(max_tf: u32) -> u8 {
|
||||
max_tf.min(u8::MAX as u32) as u8
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn decode_block_wand_max_tf(max_tf_code: u8) -> u32 {
|
||||
if max_tf_code == u8::MAX {
|
||||
u32::MAX
|
||||
} else {
|
||||
max_tf_code as u32
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_u32(data: &[u8]) -> u32 {
|
||||
u32::from_le_bytes(data[..4].try_into().unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn write_u32(val: u32, buf: &mut Vec<u8>) {
|
||||
buf.extend_from_slice(&val.to_le_bytes());
|
||||
}
|
||||
|
||||
pub struct SkipSerializer {
|
||||
buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl SkipSerializer {
|
||||
pub fn new() -> SkipSerializer {
|
||||
SkipSerializer { buffer: Vec::new() }
|
||||
}
|
||||
|
||||
pub fn write_doc(&mut self, last_doc: DocId, doc_num_bits: u8) {
|
||||
write_u32(last_doc, &mut self.buffer);
|
||||
self.buffer.push(encode_bitwidth(doc_num_bits, true));
|
||||
}
|
||||
|
||||
pub fn write_term_freq(&mut self, tf_num_bits: u8) {
|
||||
self.buffer.push(tf_num_bits);
|
||||
}
|
||||
|
||||
pub fn write_total_term_freq(&mut self, tf_sum: u32) {
|
||||
write_u32(tf_sum, &mut self.buffer);
|
||||
}
|
||||
|
||||
pub fn write_blockwand_max(&mut self, fieldnorm_id: u8, term_freq: u32) {
|
||||
let block_wand_tf = encode_block_wand_max_tf(term_freq);
|
||||
self.buffer
|
||||
.extend_from_slice(&[fieldnorm_id, block_wand_tf]);
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.buffer[..]
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.buffer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SkipReader {
|
||||
last_doc_in_block: DocId,
|
||||
pub(crate) last_doc_in_previous_block: DocId,
|
||||
owned_read: OwnedBytes,
|
||||
skip_info: IndexRecordOption,
|
||||
byte_offset: usize,
|
||||
remaining_docs: u32, // number of docs remaining, including the
|
||||
// documents in the current block.
|
||||
block_info: BlockInfo,
|
||||
|
||||
position_offset: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Copy, Debug)]
|
||||
pub(crate) enum BlockInfo {
|
||||
BitPacked {
|
||||
doc_num_bits: u8,
|
||||
strict_delta_encoded: bool,
|
||||
tf_num_bits: u8,
|
||||
tf_sum: u32,
|
||||
block_wand_fieldnorm_id: u8,
|
||||
block_wand_term_freq: u32,
|
||||
},
|
||||
VInt {
|
||||
num_docs: u32,
|
||||
},
|
||||
}
|
||||
|
||||
impl Default for BlockInfo {
|
||||
fn default() -> Self {
|
||||
BlockInfo::VInt { num_docs: 0u32 }
|
||||
}
|
||||
}
|
||||
|
||||
impl SkipReader {
|
||||
pub fn new(data: OwnedBytes, doc_freq: u32, skip_info: IndexRecordOption) -> SkipReader {
|
||||
let mut skip_reader = SkipReader {
|
||||
last_doc_in_block: if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
0
|
||||
} else {
|
||||
TERMINATED
|
||||
},
|
||||
last_doc_in_previous_block: 0u32,
|
||||
owned_read: data,
|
||||
skip_info,
|
||||
block_info: BlockInfo::VInt { num_docs: doc_freq },
|
||||
byte_offset: 0,
|
||||
remaining_docs: doc_freq,
|
||||
position_offset: 0u64,
|
||||
};
|
||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
skip_reader.read_block_info();
|
||||
}
|
||||
skip_reader
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) {
|
||||
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
0
|
||||
} else {
|
||||
TERMINATED
|
||||
};
|
||||
self.last_doc_in_previous_block = 0u32;
|
||||
self.owned_read = data;
|
||||
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
|
||||
self.byte_offset = 0;
|
||||
self.remaining_docs = doc_freq;
|
||||
self.position_offset = 0u64;
|
||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
self.read_block_info();
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the block max score for this block if available.
|
||||
//
|
||||
// The block max score is available for all full bitpacked block,
|
||||
// but no available for the last VInt encoded incomplete block.
|
||||
pub fn block_max_score(&self, bm25_weight: &Bm25Weight) -> Option<Score> {
|
||||
match self.block_info {
|
||||
BlockInfo::BitPacked {
|
||||
block_wand_fieldnorm_id,
|
||||
block_wand_term_freq,
|
||||
..
|
||||
} => Some(bm25_weight.score(block_wand_fieldnorm_id, block_wand_term_freq)),
|
||||
BlockInfo::VInt { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn last_doc_in_block(&self) -> DocId {
|
||||
self.last_doc_in_block
|
||||
}
|
||||
|
||||
pub fn position_offset(&self) -> u64 {
|
||||
self.position_offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn byte_offset(&self) -> usize {
|
||||
self.byte_offset
|
||||
}
|
||||
|
||||
fn read_block_info(&mut self) {
|
||||
let bytes = self.owned_read.as_slice();
|
||||
let advance_len: usize;
|
||||
self.last_doc_in_block = read_u32(bytes);
|
||||
let (doc_num_bits, strict_delta_encoded) = decode_bitwidth(bytes[4]);
|
||||
match self.skip_info {
|
||||
IndexRecordOption::Basic => {
|
||||
advance_len = 5;
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
strict_delta_encoded,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0,
|
||||
};
|
||||
}
|
||||
IndexRecordOption::WithFreqs => {
|
||||
let tf_num_bits = bytes[5];
|
||||
let block_wand_fieldnorm_id = bytes[6];
|
||||
let block_wand_term_freq = decode_block_wand_max_tf(bytes[7]);
|
||||
advance_len = 8;
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
strict_delta_encoded,
|
||||
tf_num_bits,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id,
|
||||
block_wand_term_freq,
|
||||
};
|
||||
}
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
let tf_num_bits = bytes[5];
|
||||
let tf_sum = read_u32(&bytes[6..10]);
|
||||
let block_wand_fieldnorm_id = bytes[10];
|
||||
let block_wand_term_freq = decode_block_wand_max_tf(bytes[11]);
|
||||
advance_len = 12;
|
||||
self.block_info = BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
strict_delta_encoded,
|
||||
tf_num_bits,
|
||||
tf_sum,
|
||||
block_wand_fieldnorm_id,
|
||||
block_wand_term_freq,
|
||||
};
|
||||
}
|
||||
}
|
||||
self.owned_read.advance(advance_len);
|
||||
}
|
||||
|
||||
pub fn block_info(&self) -> BlockInfo {
|
||||
self.block_info
|
||||
}
|
||||
|
||||
/// Advance the skip reader to the block that may contain the target.
|
||||
///
|
||||
/// If the target is larger than all documents, the skip_reader
|
||||
/// then advance to the last Variable In block.
|
||||
pub fn seek(&mut self, target: DocId) -> bool {
|
||||
if self.last_doc_in_block() >= target {
|
||||
return false;
|
||||
}
|
||||
loop {
|
||||
self.advance();
|
||||
if self.last_doc_in_block() >= target {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn advance(&mut self) {
|
||||
match self.block_info {
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits,
|
||||
tf_num_bits,
|
||||
tf_sum,
|
||||
..
|
||||
} => {
|
||||
self.remaining_docs -= COMPRESSION_BLOCK_SIZE as u32;
|
||||
self.byte_offset += compressed_block_size(doc_num_bits + tf_num_bits);
|
||||
self.position_offset += tf_sum as u64;
|
||||
}
|
||||
BlockInfo::VInt { num_docs } => {
|
||||
debug_assert_eq!(num_docs, self.remaining_docs);
|
||||
self.remaining_docs = 0;
|
||||
self.byte_offset = usize::MAX;
|
||||
}
|
||||
}
|
||||
self.last_doc_in_previous_block = self.last_doc_in_block;
|
||||
if self.remaining_docs >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
self.read_block_info();
|
||||
} else {
|
||||
self.last_doc_in_block = TERMINATED;
|
||||
self.block_info = BlockInfo::VInt {
|
||||
num_docs: self.remaining_docs,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::{
|
||||
decode_bitwidth, encode_bitwidth, BlockInfo, IndexRecordOption, SkipReader, SkipSerializer,
|
||||
};
|
||||
use crate::directory::OwnedBytes;
|
||||
use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
|
||||
#[test]
|
||||
fn test_encode_block_wand_max_tf() {
|
||||
for tf in 0..255 {
|
||||
assert_eq!(super::encode_block_wand_max_tf(tf), tf as u8);
|
||||
}
|
||||
for &tf in &[255, 256, 1_000_000, u32::MAX] {
|
||||
assert_eq!(super::encode_block_wand_max_tf(tf), 255);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_block_wand_max_tf() {
|
||||
for tf in 0..255 {
|
||||
assert_eq!(super::decode_block_wand_max_tf(tf), tf as u32);
|
||||
}
|
||||
assert_eq!(super::decode_block_wand_max_tf(255), u32::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_with_freq() {
|
||||
let buf = {
|
||||
let mut skip_serializer = SkipSerializer::new();
|
||||
skip_serializer.write_doc(1u32, 2u8);
|
||||
skip_serializer.write_term_freq(3u8);
|
||||
skip_serializer.write_blockwand_max(13u8, 3u32);
|
||||
skip_serializer.write_doc(5u32, 5u8);
|
||||
skip_serializer.write_term_freq(2u8);
|
||||
skip_serializer.write_blockwand_max(8u8, 2u32);
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::WithFreqs);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info,
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 2u8,
|
||||
strict_delta_encoded: true,
|
||||
tf_num_bits: 3u8,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id: 13,
|
||||
block_wand_term_freq: 3
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 5u8,
|
||||
strict_delta_encoded: true,
|
||||
tf_num_bits: 2u8,
|
||||
tf_sum: 0,
|
||||
block_wand_fieldnorm_id: 8,
|
||||
block_wand_term_freq: 2
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_no_freq() {
|
||||
let buf = {
|
||||
let mut skip_serializer = SkipSerializer::new();
|
||||
skip_serializer.write_doc(1u32, 2u8);
|
||||
skip_serializer.write_doc(5u32, 5u8);
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = 3u32 + (COMPRESSION_BLOCK_SIZE * 2) as u32;
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 2u8,
|
||||
strict_delta_encoded: true,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0u32,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 5u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 5u8,
|
||||
strict_delta_encoded: true,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0u32,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 3u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_multiple_of_block_size() {
|
||||
let buf = {
|
||||
let mut skip_serializer = SkipSerializer::new();
|
||||
skip_serializer.write_doc(1u32, 2u8);
|
||||
skip_serializer.data().to_owned()
|
||||
};
|
||||
let doc_freq = COMPRESSION_BLOCK_SIZE as u32;
|
||||
let mut skip_reader =
|
||||
SkipReader::new(OwnedBytes::new(buf), doc_freq, IndexRecordOption::Basic);
|
||||
assert_eq!(skip_reader.last_doc_in_block(), 1u32);
|
||||
assert_eq!(
|
||||
skip_reader.block_info(),
|
||||
BlockInfo::BitPacked {
|
||||
doc_num_bits: 2u8,
|
||||
strict_delta_encoded: true,
|
||||
tf_num_bits: 0,
|
||||
tf_sum: 0u32,
|
||||
block_wand_fieldnorm_id: 0,
|
||||
block_wand_term_freq: 0
|
||||
}
|
||||
);
|
||||
skip_reader.advance();
|
||||
assert_eq!(skip_reader.block_info(), BlockInfo::VInt { num_docs: 0u32 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_decode_bitwidth() {
|
||||
for bitwidth in 0..32 {
|
||||
for delta_1 in [false, true] {
|
||||
assert_eq!(
|
||||
(bitwidth, delta_1),
|
||||
decode_bitwidth(encode_bitwidth(bitwidth, delta_1))
|
||||
);
|
||||
}
|
||||
}
|
||||
assert_eq!(0b01000010, encode_bitwidth(0b10, true));
|
||||
assert_eq!(0b00000010, encode_bitwidth(0b10, false));
|
||||
}
|
||||
}
|
||||
@@ -43,7 +43,7 @@ impl Collector for Count {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_: SegmentOrdinal,
|
||||
_: &SegmentReader,
|
||||
_: &dyn SegmentReader,
|
||||
) -> crate::Result<SegmentCountCollector> {
|
||||
Ok(SegmentCountCollector::default())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::{Collector, SegmentCollector};
|
||||
use crate::{DocAddress, DocId, Score};
|
||||
use crate::{DocAddress, DocId, Score, SegmentReader};
|
||||
|
||||
/// Collectors that returns the set of DocAddress that matches the query.
|
||||
///
|
||||
@@ -15,7 +15,7 @@ impl Collector for DocSetCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: crate::SegmentOrdinal,
|
||||
_segment: &crate::SegmentReader,
|
||||
_segment: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
Ok(DocSetChildCollector {
|
||||
segment_local_id,
|
||||
|
||||
@@ -265,7 +265,7 @@ impl Collector for FacetCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_: SegmentOrdinal,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
) -> crate::Result<FacetSegmentCollector> {
|
||||
let facet_reader = reader.facet_reader(&self.field_name)?;
|
||||
let facet_dict = facet_reader.facet_dict();
|
||||
@@ -486,9 +486,9 @@ mod tests {
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use columnar::Dictionary;
|
||||
use rand::distributions::Uniform;
|
||||
use rand::distr::Uniform;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rand::{rng, Rng};
|
||||
|
||||
use super::{FacetCollector, FacetCounts};
|
||||
use crate::collector::facet_collector::compress_mapping;
|
||||
@@ -731,7 +731,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let uniform = Uniform::new_inclusive(1, 100_000);
|
||||
let uniform = Uniform::new_inclusive(1, 100_000).unwrap();
|
||||
let mut docs: Vec<TantivyDocument> =
|
||||
vec![("a", 10), ("b", 100), ("c", 7), ("d", 12), ("e", 21)]
|
||||
.into_iter()
|
||||
@@ -741,14 +741,11 @@ mod tests {
|
||||
std::iter::repeat_n(doc, count)
|
||||
})
|
||||
.map(|mut doc| {
|
||||
doc.add_facet(
|
||||
facet_field,
|
||||
&format!("/facet/{}", thread_rng().sample(uniform)),
|
||||
);
|
||||
doc.add_facet(facet_field, &format!("/facet/{}", rng().sample(uniform)));
|
||||
doc
|
||||
})
|
||||
.collect();
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
docs[..].shuffle(&mut rng());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
for doc in docs {
|
||||
@@ -822,8 +819,8 @@ mod tests {
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
|
||||
use rand::rng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
use test::Bencher;
|
||||
|
||||
use crate::collector::FacetCollector;
|
||||
@@ -846,7 +843,7 @@ mod bench {
|
||||
}
|
||||
}
|
||||
// 40425 docs
|
||||
docs[..].shuffle(&mut thread_rng());
|
||||
docs[..].shuffle(&mut rng());
|
||||
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
for doc in docs {
|
||||
|
||||
@@ -113,7 +113,7 @@ where
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let column_opt = segment_reader.fast_fields().column_opt(&self.field)?;
|
||||
|
||||
@@ -287,7 +287,7 @@ where
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let column_opt = segment_reader.fast_fields().bytes(&self.field)?;
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ use fastdivide::DividerU64;
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{FastFieldNotAvailableError, FastValue};
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, Score};
|
||||
use crate::{DocId, Score, SegmentReader};
|
||||
|
||||
/// Histogram builds an histogram of the values of a fastfield for the
|
||||
/// collected DocSet.
|
||||
@@ -110,7 +110,7 @@ impl Collector for HistogramCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: crate::SegmentOrdinal,
|
||||
segment: &crate::SegmentReader,
|
||||
segment: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let column_opt = segment.fast_fields().u64_lenient(&self.field)?;
|
||||
let (column, _column_type) = column_opt.ok_or_else(|| FastFieldNotAvailableError {
|
||||
|
||||
@@ -156,7 +156,7 @@ pub trait Collector: Sync + Send {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: SegmentOrdinal,
|
||||
segment: &SegmentReader,
|
||||
segment: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child>;
|
||||
|
||||
/// Returns true iff the collector requires to compute scores for documents.
|
||||
@@ -174,7 +174,7 @@ pub trait Collector: Sync + Send {
|
||||
&self,
|
||||
weight: &dyn Weight,
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
) -> crate::Result<<Self::Child as SegmentCollector>::Fruit> {
|
||||
let with_scoring = self.requires_scoring();
|
||||
let mut segment_collector = self.for_segment(segment_ord, reader)?;
|
||||
@@ -186,7 +186,7 @@ pub trait Collector: Sync + Send {
|
||||
pub(crate) fn default_collect_segment_impl<TSegmentCollector: SegmentCollector>(
|
||||
segment_collector: &mut TSegmentCollector,
|
||||
weight: &dyn Weight,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
with_scoring: bool,
|
||||
) -> crate::Result<()> {
|
||||
match (reader.alive_bitset(), with_scoring) {
|
||||
@@ -255,7 +255,7 @@ impl<TCollector: Collector> Collector for Option<TCollector> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: SegmentOrdinal,
|
||||
segment: &SegmentReader,
|
||||
segment: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
Ok(if let Some(inner) = self {
|
||||
let inner_segment_collector = inner.for_segment(segment_local_id, segment)?;
|
||||
@@ -336,7 +336,7 @@ where
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
segment: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let left = self.0.for_segment(segment_local_id, segment)?;
|
||||
let right = self.1.for_segment(segment_local_id, segment)?;
|
||||
@@ -407,7 +407,7 @@ where
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
segment: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let one = self.0.for_segment(segment_local_id, segment)?;
|
||||
let two = self.1.for_segment(segment_local_id, segment)?;
|
||||
@@ -487,7 +487,7 @@ where
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
segment: &SegmentReader,
|
||||
segment: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let one = self.0.for_segment(segment_local_id, segment)?;
|
||||
let two = self.1.for_segment(segment_local_id, segment)?;
|
||||
|
||||
@@ -24,7 +24,7 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: u32,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Box<dyn BoxableSegmentCollector>> {
|
||||
let child = self.0.for_segment(segment_local_id, reader)?;
|
||||
Ok(Box::new(SegmentCollectorWrapper(child)))
|
||||
@@ -209,7 +209,7 @@ impl Collector for MultiCollector<'_> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_local_id: SegmentOrdinal,
|
||||
segment: &SegmentReader,
|
||||
segment: &dyn SegmentReader,
|
||||
) -> crate::Result<MultiCollectorChild> {
|
||||
let children = self
|
||||
.collector_wrappers
|
||||
|
||||
@@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::schema::{OwnedValue, Schema};
|
||||
use crate::{DocId, Order, Score};
|
||||
use crate::{DocId, Order, Score, SegmentReader};
|
||||
|
||||
fn compare_owned_value<const NULLS_FIRST: bool>(lhs: &OwnedValue, rhs: &OwnedValue) -> Ordering {
|
||||
match (lhs, rhs) {
|
||||
@@ -430,7 +430,7 @@ where
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
segment_reader: &crate::SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let child = self.0.segment_sort_key_computer(segment_reader)?;
|
||||
Ok(SegmentSortKeyComputerWithComparator {
|
||||
@@ -468,7 +468,7 @@ where
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
segment_reader: &crate::SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let child = self.0.segment_sort_key_computer(segment_reader)?;
|
||||
Ok(SegmentSortKeyComputerWithComparator {
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::collector::sort_key::{
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::fastfield::FastFieldNotAvailableError;
|
||||
use crate::schema::OwnedValue;
|
||||
use crate::{DateTime, DocId, Score};
|
||||
use crate::{DateTime, DocId, Score, SegmentReader};
|
||||
|
||||
/// Sort by the boxed / OwnedValue representation of either a fast field, or of the score.
|
||||
///
|
||||
@@ -86,7 +86,7 @@ impl SortKeyComputer for SortByErasedType {
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
segment_reader: &crate::SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let inner: Box<dyn ErasedSegmentSortKeyComputer> = match self {
|
||||
Self::Field(column_name) => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::collector::sort_key::NaturalComparator;
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer, TopNComputer};
|
||||
use crate::{DocAddress, DocId, Score};
|
||||
use crate::{DocAddress, DocId, Score, SegmentReader};
|
||||
|
||||
/// Sort by similarity score.
|
||||
#[derive(Clone, Debug, Copy)]
|
||||
@@ -19,7 +19,7 @@ impl SortKeyComputer for SortBySimilarityScore {
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
_segment_reader: &crate::SegmentReader,
|
||||
_segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
Ok(SortBySimilarityScore)
|
||||
}
|
||||
@@ -29,7 +29,7 @@ impl SortKeyComputer for SortBySimilarityScore {
|
||||
&self,
|
||||
k: usize,
|
||||
weight: &dyn crate::query::Weight,
|
||||
reader: &crate::SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
segment_ord: u32,
|
||||
) -> crate::Result<Vec<(Self::SortKey, DocAddress)>> {
|
||||
let mut top_n: TopNComputer<Score, DocId, Self::Comparator> =
|
||||
|
||||
@@ -61,7 +61,7 @@ impl<T: FastValue> SortKeyComputer for SortByStaticFastValue<T> {
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let sort_column_opt = segment_reader.fast_fields().u64_lenient(&self.field)?;
|
||||
let (sort_column, _sort_column_type) =
|
||||
|
||||
@@ -3,7 +3,7 @@ use columnar::StrColumn;
|
||||
use crate::collector::sort_key::NaturalComparator;
|
||||
use crate::collector::{SegmentSortKeyComputer, SortKeyComputer};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use crate::{DocId, Score};
|
||||
use crate::{DocId, Score, SegmentReader};
|
||||
|
||||
/// Sort by the first value of a string column.
|
||||
///
|
||||
@@ -35,7 +35,7 @@ impl SortKeyComputer for SortByString {
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
segment_reader: &crate::SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
let str_column_opt = segment_reader.fast_fields().str(&self.column_name)?;
|
||||
Ok(ByStringColumnSegmentSortKeyComputer { str_column_opt })
|
||||
|
||||
@@ -119,7 +119,7 @@ pub trait SortKeyComputer: Sync {
|
||||
&self,
|
||||
k: usize,
|
||||
weight: &dyn crate::query::Weight,
|
||||
reader: &crate::SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
segment_ord: u32,
|
||||
) -> crate::Result<Vec<(Self::SortKey, DocAddress)>> {
|
||||
let with_scoring = self.requires_scoring();
|
||||
@@ -135,7 +135,7 @@ pub trait SortKeyComputer: Sync {
|
||||
}
|
||||
|
||||
/// Builds a child sort key computer for a specific segment.
|
||||
fn segment_sort_key_computer(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
||||
fn segment_sort_key_computer(&self, segment_reader: &dyn SegmentReader) -> Result<Self::Child>;
|
||||
}
|
||||
|
||||
impl<HeadSortKeyComputer, TailSortKeyComputer> SortKeyComputer
|
||||
@@ -156,7 +156,7 @@ where
|
||||
(self.0.comparator(), self.1.comparator())
|
||||
}
|
||||
|
||||
fn segment_sort_key_computer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
fn segment_sort_key_computer(&self, segment_reader: &dyn SegmentReader) -> Result<Self::Child> {
|
||||
Ok((
|
||||
self.0.segment_sort_key_computer(segment_reader)?,
|
||||
self.1.segment_sort_key_computer(segment_reader)?,
|
||||
@@ -357,7 +357,7 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
fn segment_sort_key_computer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
fn segment_sort_key_computer(&self, segment_reader: &dyn SegmentReader) -> Result<Self::Child> {
|
||||
let sort_key_computer1 = self.0.segment_sort_key_computer(segment_reader)?;
|
||||
let sort_key_computer2 = self.1.segment_sort_key_computer(segment_reader)?;
|
||||
let sort_key_computer3 = self.2.segment_sort_key_computer(segment_reader)?;
|
||||
@@ -420,7 +420,7 @@ where
|
||||
SortKeyComputer4::Comparator,
|
||||
);
|
||||
|
||||
fn segment_sort_key_computer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
fn segment_sort_key_computer(&self, segment_reader: &dyn SegmentReader) -> Result<Self::Child> {
|
||||
let sort_key_computer1 = self.0.segment_sort_key_computer(segment_reader)?;
|
||||
let sort_key_computer2 = self.1.segment_sort_key_computer(segment_reader)?;
|
||||
let sort_key_computer3 = self.2.segment_sort_key_computer(segment_reader)?;
|
||||
@@ -454,7 +454,7 @@ where
|
||||
|
||||
impl<F, SegmentF, TSortKey> SortKeyComputer for F
|
||||
where
|
||||
F: 'static + Send + Sync + Fn(&SegmentReader) -> SegmentF,
|
||||
F: 'static + Send + Sync + Fn(&dyn SegmentReader) -> SegmentF,
|
||||
SegmentF: 'static + FnMut(DocId) -> TSortKey,
|
||||
TSortKey: 'static + PartialOrd + Clone + Send + Sync + std::fmt::Debug,
|
||||
{
|
||||
@@ -462,7 +462,7 @@ where
|
||||
type Child = SegmentF;
|
||||
type Comparator = NaturalComparator;
|
||||
|
||||
fn segment_sort_key_computer(&self, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
fn segment_sort_key_computer(&self, segment_reader: &dyn SegmentReader) -> Result<Self::Child> {
|
||||
Ok((self)(segment_reader))
|
||||
}
|
||||
}
|
||||
@@ -509,10 +509,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_lazy_score_computer() {
|
||||
let score_computer_primary = |_segment_reader: &SegmentReader| |_doc: DocId| 200u32;
|
||||
let score_computer_primary = |_segment_reader: &dyn SegmentReader| |_doc: DocId| 200u32;
|
||||
let call_count = Arc::new(AtomicUsize::new(0));
|
||||
let call_count_clone = call_count.clone();
|
||||
let score_computer_secondary = move |_segment_reader: &SegmentReader| {
|
||||
let score_computer_secondary = move |_segment_reader: &dyn SegmentReader| {
|
||||
let call_count_new_clone = call_count_clone.clone();
|
||||
move |_doc: DocId| {
|
||||
call_count_new_clone.fetch_add(1, AtomicOrdering::SeqCst);
|
||||
@@ -572,10 +572,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_lazy_score_computer_dynamic_ordering() {
|
||||
let score_computer_primary = |_segment_reader: &SegmentReader| |_doc: DocId| 200u32;
|
||||
let score_computer_primary = |_segment_reader: &dyn SegmentReader| |_doc: DocId| 200u32;
|
||||
let call_count = Arc::new(AtomicUsize::new(0));
|
||||
let call_count_clone = call_count.clone();
|
||||
let score_computer_secondary = move |_segment_reader: &SegmentReader| {
|
||||
let score_computer_secondary = move |_segment_reader: &dyn SegmentReader| {
|
||||
let call_count_new_clone = call_count_clone.clone();
|
||||
move |_doc: DocId| {
|
||||
call_count_new_clone.fetch_add(1, AtomicOrdering::SeqCst);
|
||||
|
||||
@@ -32,7 +32,11 @@ where TSortKeyComputer: SortKeyComputer + Send + Sync + 'static
|
||||
self.sort_key_computer.check_schema(schema)
|
||||
}
|
||||
|
||||
fn for_segment(&self, segment_ord: u32, segment_reader: &SegmentReader) -> Result<Self::Child> {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_ord: u32,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> Result<Self::Child> {
|
||||
let segment_sort_key_computer = self
|
||||
.sort_key_computer
|
||||
.segment_sort_key_computer(segment_reader)?;
|
||||
@@ -63,7 +67,7 @@ where TSortKeyComputer: SortKeyComputer + Send + Sync + 'static
|
||||
&self,
|
||||
weight: &dyn Weight,
|
||||
segment_ord: u32,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Vec<(TSortKeyComputer::SortKey, DocAddress)>> {
|
||||
let k = self.doc_range.end;
|
||||
let docs = self
|
||||
@@ -160,7 +164,7 @@ mod tests {
|
||||
expected: &[(crate::Score, usize)],
|
||||
) {
|
||||
let mut vals: Vec<(crate::Score, usize)> = (0..10).map(|val| (val as f32, val)).collect();
|
||||
vals.shuffle(&mut rand::thread_rng());
|
||||
vals.shuffle(&mut rand::rng());
|
||||
let vals_merged = merge_top_k(vals.into_iter(), doc_range, ComparatorEnum::from(order));
|
||||
assert_eq!(&vals_merged, expected);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::query::{AllQuery, QueryParser};
|
||||
use crate::schema::{Schema, FAST, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::{DateTime, DocAddress, Index, Searcher, TantivyDocument};
|
||||
use crate::{DateTime, DocAddress, Index, Searcher, SegmentReader, TantivyDocument};
|
||||
|
||||
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
|
||||
compute_score: true,
|
||||
@@ -109,7 +109,7 @@ impl Collector for TestCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
segment_id: SegmentOrdinal,
|
||||
_reader: &SegmentReader,
|
||||
_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<TestSegmentCollector> {
|
||||
Ok(TestSegmentCollector {
|
||||
segment_id,
|
||||
@@ -180,7 +180,7 @@ impl Collector for FastFieldTestCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_: SegmentOrdinal,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<FastFieldSegmentCollector> {
|
||||
let reader = segment_reader
|
||||
.fast_fields()
|
||||
@@ -243,7 +243,7 @@ impl Collector for BytesFastFieldTestCollector {
|
||||
fn for_segment(
|
||||
&self,
|
||||
_segment_local_id: u32,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<BytesFastFieldSegmentCollector> {
|
||||
let column_opt = segment_reader.fast_fields().bytes(&self.field)?;
|
||||
Ok(BytesFastFieldSegmentCollector {
|
||||
|
||||
@@ -393,7 +393,7 @@ impl TopDocs {
|
||||
/// // This is where we build our collector with our custom score.
|
||||
/// let top_docs_by_custom_score = TopDocs
|
||||
/// ::with_limit(10)
|
||||
/// .tweak_score(move |segment_reader: &SegmentReader| {
|
||||
/// .tweak_score(move |segment_reader: &dyn SegmentReader| {
|
||||
/// // The argument is a function that returns our scoring
|
||||
/// // function.
|
||||
/// //
|
||||
@@ -442,7 +442,7 @@ pub struct TweakScoreFn<F>(F);
|
||||
|
||||
impl<F, TTweakScoreSortKeyFn, TSortKey> SortKeyComputer for TweakScoreFn<F>
|
||||
where
|
||||
F: 'static + Send + Sync + Fn(&SegmentReader) -> TTweakScoreSortKeyFn,
|
||||
F: 'static + Send + Sync + Fn(&dyn SegmentReader) -> TTweakScoreSortKeyFn,
|
||||
TTweakScoreSortKeyFn: 'static + Fn(DocId, Score) -> TSortKey,
|
||||
TweakScoreSegmentSortKeyComputer<TTweakScoreSortKeyFn>:
|
||||
SegmentSortKeyComputer<SortKey = TSortKey, SegmentSortKey = TSortKey>,
|
||||
@@ -458,7 +458,7 @@ where
|
||||
|
||||
fn segment_sort_key_computer(
|
||||
&self,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<Self::Child> {
|
||||
Ok({
|
||||
TweakScoreSegmentSortKeyComputer {
|
||||
@@ -1525,7 +1525,7 @@ mod tests {
|
||||
let text_query = query_parser.parse_query("droopy tax")?;
|
||||
let collector = TopDocs::with_limit(2)
|
||||
.and_offset(1)
|
||||
.order_by(move |_segment_reader: &SegmentReader| move |doc: DocId| doc);
|
||||
.order_by(move |_segment_reader: &dyn SegmentReader| move |doc: DocId| doc);
|
||||
let score_docs: Vec<(u32, DocAddress)> =
|
||||
index.reader()?.searcher().search(&text_query, &collector)?;
|
||||
assert_eq!(
|
||||
@@ -1543,7 +1543,7 @@ mod tests {
|
||||
let text_query = query_parser.parse_query("droopy tax").unwrap();
|
||||
let collector = TopDocs::with_limit(2)
|
||||
.and_offset(1)
|
||||
.order_by(move |_segment_reader: &SegmentReader| move |doc: DocId| doc);
|
||||
.order_by(move |_segment_reader: &dyn SegmentReader| move |doc: DocId| doc);
|
||||
let score_docs: Vec<(u32, DocAddress)> = index
|
||||
.reader()
|
||||
.unwrap()
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::{fmt, io};
|
||||
|
||||
use crate::collector::Collector;
|
||||
use crate::core::Executor;
|
||||
use crate::index::{SegmentId, SegmentReader};
|
||||
use crate::index::{ArcSegmentReader, SegmentId, SegmentReader};
|
||||
use crate::query::{Bm25StatisticsProvider, EnableScoring, Query};
|
||||
use crate::schema::document::DocumentDeserialize;
|
||||
use crate::schema::{Schema, Term};
|
||||
@@ -36,7 +36,7 @@ pub struct SearcherGeneration {
|
||||
|
||||
impl SearcherGeneration {
|
||||
pub(crate) fn from_segment_readers(
|
||||
segment_readers: &[SegmentReader],
|
||||
segment_readers: &[ArcSegmentReader],
|
||||
generation_id: u64,
|
||||
) -> Self {
|
||||
let mut segment_id_to_del_opstamp = BTreeMap::new();
|
||||
@@ -133,7 +133,7 @@ impl Searcher {
|
||||
pub fn doc_freq(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.inner.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let inverted_index = segment_reader.as_ref().inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq(term)?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
}
|
||||
@@ -146,7 +146,7 @@ impl Searcher {
|
||||
pub async fn doc_freq_async(&self, term: &Term) -> crate::Result<u64> {
|
||||
let mut total_doc_freq = 0;
|
||||
for segment_reader in &self.inner.segment_readers {
|
||||
let inverted_index = segment_reader.inverted_index(term.field())?;
|
||||
let inverted_index = segment_reader.as_ref().inverted_index(term.field())?;
|
||||
let doc_freq = inverted_index.doc_freq_async(term).await?;
|
||||
total_doc_freq += u64::from(doc_freq);
|
||||
}
|
||||
@@ -154,13 +154,13 @@ impl Searcher {
|
||||
}
|
||||
|
||||
/// Return the list of segment readers
|
||||
pub fn segment_readers(&self) -> &[SegmentReader] {
|
||||
pub fn segment_readers(&self) -> &[ArcSegmentReader] {
|
||||
&self.inner.segment_readers
|
||||
}
|
||||
|
||||
/// Returns the segment_reader associated with the given segment_ord
|
||||
pub fn segment_reader(&self, segment_ord: u32) -> &SegmentReader {
|
||||
&self.inner.segment_readers[segment_ord as usize]
|
||||
pub fn segment_reader(&self, segment_ord: u32) -> &dyn SegmentReader {
|
||||
self.inner.segment_readers[segment_ord as usize].as_ref()
|
||||
}
|
||||
|
||||
/// Runs a query on the segment readers wrapped by the searcher.
|
||||
@@ -229,7 +229,11 @@ impl Searcher {
|
||||
let segment_readers = self.segment_readers();
|
||||
let fruits = executor.map(
|
||||
|(segment_ord, segment_reader)| {
|
||||
collector.collect_segment(weight.as_ref(), segment_ord as u32, segment_reader)
|
||||
collector.collect_segment(
|
||||
weight.as_ref(),
|
||||
segment_ord as u32,
|
||||
segment_reader.as_ref(),
|
||||
)
|
||||
},
|
||||
segment_readers.iter().enumerate(),
|
||||
)?;
|
||||
@@ -259,7 +263,7 @@ impl From<Arc<SearcherInner>> for Searcher {
|
||||
pub(crate) struct SearcherInner {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
segment_readers: Vec<ArcSegmentReader>,
|
||||
store_readers: Vec<StoreReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
}
|
||||
@@ -269,7 +273,7 @@ impl SearcherInner {
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
segment_readers: Vec<ArcSegmentReader>,
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
doc_store_cache_num_blocks: usize,
|
||||
) -> io::Result<SearcherInner> {
|
||||
@@ -301,7 +305,7 @@ impl fmt::Debug for Searcher {
|
||||
let segment_ids = self
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.map(|segment_reader| segment_reader.segment_id())
|
||||
.collect::<Vec<_>>();
|
||||
write!(f, "Searcher({segment_ids:?})")
|
||||
}
|
||||
|
||||
@@ -51,31 +51,55 @@ pub trait DocSet: Send {
|
||||
doc
|
||||
}
|
||||
|
||||
/// Seeks to the target if possible and returns true if the target is in the DocSet.
|
||||
/// !!!Dragons ahead!!!
|
||||
/// In spirit, this is an approximate and dangerous version of `seek`.
|
||||
///
|
||||
/// It can leave the DocSet in an `invalid` state and might return a
|
||||
/// lower bound of what the result of Seek would have been.
|
||||
///
|
||||
///
|
||||
/// More accurately it returns either:
|
||||
/// - Found if the target is in the docset. In that case, the DocSet is left in a valid state.
|
||||
/// - SeekLowerBound(seek_lower_bound) if the target is not in the docset. In that case, The
|
||||
/// DocSet can be the left in a invalid state. The DocSet should then only receives call to
|
||||
/// `seek_danger(..)` until it returns `Found`, and get back to a valid state.
|
||||
///
|
||||
/// `seek_lower_bound` can be any `DocId` (in the docset or not) as long as it is in
|
||||
/// `(target .. seek_result]` where `seek_result` is the first document in the docset greater
|
||||
/// than to `target`.
|
||||
///
|
||||
/// `seek_danger` may return `SeekLowerBound(TERMINATED)`.
|
||||
///
|
||||
/// Calling `seek_danger` with TERMINATED as a target is allowed,
|
||||
/// and should always return NewTarget(TERMINATED) or anything larger as TERMINATED is NOT in
|
||||
/// the DocSet.
|
||||
///
|
||||
/// DocSets that already have an efficient `seek` method don't need to implement
|
||||
/// `seek_into_the_danger_zone`. All wrapper DocSets should forward
|
||||
/// `seek_into_the_danger_zone` to the underlying DocSet.
|
||||
/// `seek_danger`.
|
||||
///
|
||||
/// ## API Behaviour
|
||||
/// If `seek_into_the_danger_zone` is returning true, a call to `doc()` has to return target.
|
||||
/// If `seek_into_the_danger_zone` is returning false, a call to `doc()` may return any doc
|
||||
/// between the last doc that matched and target or a doc that is a valid next hit after
|
||||
/// target. The DocSet is considered to be in an invalid state until
|
||||
/// `seek_into_the_danger_zone` returns true again.
|
||||
///
|
||||
/// `target` needs to be equal or larger than `doc` when in a valid state.
|
||||
///
|
||||
/// Consecutive calls are not allowed to have decreasing `target` values.
|
||||
///
|
||||
/// # Warning
|
||||
/// This is an advanced API used by intersection. The API contract is tricky, avoid using it.
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
let current_doc = self.doc();
|
||||
if current_doc < target {
|
||||
self.seek(target);
|
||||
/// Consecutive calls to seek_danger are guaranteed to have strictly increasing `target`
|
||||
/// values.
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
if target >= TERMINATED {
|
||||
debug_assert!(target == TERMINATED);
|
||||
// No need to advance.
|
||||
return SeekDangerResult::SeekLowerBound(target);
|
||||
}
|
||||
|
||||
// The default implementation does not include any
|
||||
// `danger zone` behavior.
|
||||
//
|
||||
// It does not leave the scorer in an invalid state.
|
||||
// For this reason, we can safely call `self.doc()`.
|
||||
let mut doc = self.doc();
|
||||
if doc < target {
|
||||
doc = self.seek(target);
|
||||
}
|
||||
if doc == target {
|
||||
SeekDangerResult::Found
|
||||
} else {
|
||||
SeekDangerResult::SeekLowerBound(self.doc())
|
||||
}
|
||||
self.doc() == target
|
||||
}
|
||||
|
||||
/// Fills a given mutable buffer with the next doc ids from the
|
||||
@@ -166,6 +190,17 @@ pub trait DocSet: Send {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum SeekDangerResult {
|
||||
/// The target was found in the DocSet.
|
||||
Found,
|
||||
/// The target was not found in the DocSet.
|
||||
/// We return a range in which the value could be.
|
||||
/// The given target can be any DocId, that is <= than the first document
|
||||
/// in the docset after the target.
|
||||
SeekLowerBound(DocId),
|
||||
}
|
||||
|
||||
impl DocSet for &mut dyn DocSet {
|
||||
fn advance(&mut self) -> u32 {
|
||||
(**self).advance()
|
||||
@@ -175,8 +210,8 @@ impl DocSet for &mut dyn DocSet {
|
||||
(**self).seek(target)
|
||||
}
|
||||
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
(**self).seek_into_the_danger_zone(target)
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
(**self).seek_danger(target)
|
||||
}
|
||||
|
||||
fn doc(&self) -> u32 {
|
||||
@@ -211,9 +246,9 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
|
||||
unboxed.seek(target)
|
||||
}
|
||||
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
let unboxed: &mut TDocSet = self.borrow_mut();
|
||||
unboxed.seek_into_the_danger_zone(target)
|
||||
unboxed.seek_danger(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId; COLLECT_BLOCK_BUFFER_LEN]) -> usize {
|
||||
|
||||
@@ -162,7 +162,7 @@ mod tests {
|
||||
mod bench {
|
||||
|
||||
use rand::prelude::IteratorRandom;
|
||||
use rand::thread_rng;
|
||||
use rand::rng;
|
||||
use test::Bencher;
|
||||
|
||||
use super::AliveBitSet;
|
||||
@@ -176,7 +176,7 @@ mod bench {
|
||||
}
|
||||
|
||||
fn remove_rand(raw: &mut Vec<u32>) {
|
||||
let i = (0..raw.len()).choose(&mut thread_rng()).unwrap();
|
||||
let i = (0..raw.len()).choose(&mut rng()).unwrap();
|
||||
raw.remove(i);
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ mod tests {
|
||||
};
|
||||
use crate::time::OffsetDateTime;
|
||||
use crate::tokenizer::{LowerCaser, RawTokenizer, TextAnalyzer, TokenizerManager};
|
||||
use crate::{Index, IndexWriter, SegmentReader};
|
||||
use crate::{Index, IndexWriter};
|
||||
|
||||
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -430,7 +430,7 @@ mod tests {
|
||||
.searcher()
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.map(|segment_reader| segment_reader.segment_id())
|
||||
.collect();
|
||||
assert_eq!(segment_ids.len(), 2);
|
||||
index_writer.merge(&segment_ids[..]).wait().unwrap();
|
||||
@@ -879,7 +879,7 @@ mod tests {
|
||||
const ONE_HOUR_IN_MICROSECS: i64 = 3_600 * 1_000_000;
|
||||
let times: Vec<DateTime> = std::iter::repeat_with(|| {
|
||||
// +- One hour.
|
||||
let t = T0 + rng.gen_range(-ONE_HOUR_IN_MICROSECS..ONE_HOUR_IN_MICROSECS);
|
||||
let t = T0 + rng.random_range(-ONE_HOUR_IN_MICROSECS..ONE_HOUR_IN_MICROSECS);
|
||||
DateTime::from_timestamp_micros(t)
|
||||
})
|
||||
.take(1_000)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use rand::{rng, Rng};
|
||||
|
||||
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
|
||||
use crate::schema::*;
|
||||
@@ -29,7 +29,7 @@ fn test_functional_store() -> crate::Result<()> {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let reader = index.reader()?;
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rng();
|
||||
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
|
||||
@@ -38,9 +38,9 @@ fn test_functional_store() -> crate::Result<()> {
|
||||
|
||||
let mut doc_id = 0u64;
|
||||
for _iteration in 0..get_num_iterations() {
|
||||
let num_docs: usize = rng.gen_range(0..4);
|
||||
let num_docs: usize = rng.random_range(0..4);
|
||||
if !doc_set.is_empty() {
|
||||
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
|
||||
let doc_to_remove_id = rng.random_range(0..doc_set.len());
|
||||
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
|
||||
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
|
||||
}
|
||||
@@ -70,10 +70,10 @@ const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit
|
||||
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat \
|
||||
non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
|
||||
fn get_text() -> String {
|
||||
use rand::seq::SliceRandom;
|
||||
let mut rng = thread_rng();
|
||||
use rand::seq::IndexedRandom;
|
||||
let mut rng = rng();
|
||||
let tokens: Vec<_> = LOREM.split(' ').collect();
|
||||
let random_val = rng.gen_range(0..20);
|
||||
let random_val = rng.random_range(0..20);
|
||||
|
||||
(0..random_val)
|
||||
.map(|_| tokens.choose(&mut rng).unwrap())
|
||||
@@ -101,7 +101,7 @@ fn test_functional_indexing_unsorted() -> crate::Result<()> {
|
||||
let index = Index::create_from_tempdir(schema)?;
|
||||
let reader = index.reader()?;
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rng();
|
||||
|
||||
let mut index_writer: IndexWriter =
|
||||
index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
|
||||
@@ -110,7 +110,7 @@ fn test_functional_indexing_unsorted() -> crate::Result<()> {
|
||||
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
|
||||
|
||||
for _ in 0..get_num_iterations() {
|
||||
let random_val = rng.gen_range(0..20);
|
||||
let random_val = rng.random_range(0..20);
|
||||
if random_val == 0 {
|
||||
index_writer.commit()?;
|
||||
committed_docs.extend(&uncommitted_docs);
|
||||
|
||||
@@ -8,14 +8,13 @@ use std::thread::available_parallelism;
|
||||
use super::segment::Segment;
|
||||
use super::segment_reader::merge_field_meta_data;
|
||||
use super::{FieldMetadata, IndexSettings};
|
||||
use crate::codec::{CodecConfiguration, StandardCodec};
|
||||
use crate::core::{Executor, META_FILEPATH};
|
||||
use crate::directory::error::OpenReadError;
|
||||
#[cfg(feature = "mmap")]
|
||||
use crate::directory::MmapDirectory;
|
||||
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
|
||||
use crate::error::{DataCorruption, TantivyError};
|
||||
use crate::index::{IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory};
|
||||
use crate::index::{IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, SegmentReader};
|
||||
use crate::indexer::index_writer::{
|
||||
IndexWriterOptions, MAX_NUM_THREAD, MEMORY_BUDGET_NUM_BYTES_MIN,
|
||||
};
|
||||
@@ -25,7 +24,7 @@ use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||
use crate::schema::document::Document;
|
||||
use crate::schema::{Field, FieldType, Schema};
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::SegmentReader;
|
||||
use crate::TantivySegmentReader;
|
||||
|
||||
fn load_metas(
|
||||
directory: &dyn Directory,
|
||||
@@ -60,7 +59,6 @@ fn save_new_metas(
|
||||
schema: Schema,
|
||||
index_settings: IndexSettings,
|
||||
directory: &dyn Directory,
|
||||
codec: CodecConfiguration,
|
||||
) -> crate::Result<()> {
|
||||
save_metas(
|
||||
&IndexMeta {
|
||||
@@ -69,7 +67,6 @@ fn save_new_metas(
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
codec,
|
||||
},
|
||||
directory,
|
||||
)?;
|
||||
@@ -104,21 +101,18 @@ fn save_new_metas(
|
||||
/// };
|
||||
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
|
||||
/// ```
|
||||
pub struct IndexBuilder<Codec: crate::codec::Codec = StandardCodec> {
|
||||
pub struct IndexBuilder {
|
||||
schema: Option<Schema>,
|
||||
index_settings: IndexSettings,
|
||||
tokenizer_manager: TokenizerManager,
|
||||
fast_field_tokenizer_manager: TokenizerManager,
|
||||
codec: Codec,
|
||||
}
|
||||
|
||||
impl Default for IndexBuilder<StandardCodec> {
|
||||
impl Default for IndexBuilder {
|
||||
fn default() -> Self {
|
||||
IndexBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexBuilder<StandardCodec> {
|
||||
impl IndexBuilder {
|
||||
/// Creates a new `IndexBuilder`
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
@@ -126,21 +120,6 @@ impl IndexBuilder<StandardCodec> {
|
||||
index_settings: IndexSettings::default(),
|
||||
tokenizer_manager: TokenizerManager::default(),
|
||||
fast_field_tokenizer_manager: TokenizerManager::default(),
|
||||
codec: StandardCodec,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
/// Set the codec
|
||||
#[must_use]
|
||||
pub fn codec<NewCodec: crate::codec::Codec>(self, codec: NewCodec) -> IndexBuilder<NewCodec> {
|
||||
IndexBuilder {
|
||||
schema: self.schema,
|
||||
index_settings: self.index_settings,
|
||||
tokenizer_manager: self.tokenizer_manager,
|
||||
fast_field_tokenizer_manager: self.fast_field_tokenizer_manager,
|
||||
codec,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,7 +154,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
/// The index will be allocated in anonymous memory.
|
||||
/// This is useful for indexing small set of documents
|
||||
/// for instances like unit test or temporary in memory index.
|
||||
pub fn create_in_ram(self) -> Result<Index<Codec>, TantivyError> {
|
||||
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
||||
let ram_directory = RamDirectory::create();
|
||||
self.create(ram_directory)
|
||||
}
|
||||
@@ -186,7 +165,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
/// If a previous index was in this directory, it returns an
|
||||
/// [`TantivyError::IndexAlreadyExists`] error.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_in_dir<P: AsRef<Path>>(self, directory_path: P) -> crate::Result<Index<Codec>> {
|
||||
pub fn create_in_dir<P: AsRef<Path>>(self, directory_path: P) -> crate::Result<Index> {
|
||||
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::open(directory_path)?);
|
||||
if Index::exists(&*mmap_directory)? {
|
||||
return Err(TantivyError::IndexAlreadyExists);
|
||||
@@ -207,7 +186,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
self,
|
||||
dir: impl Into<Box<dyn Directory>>,
|
||||
mem_budget: usize,
|
||||
) -> crate::Result<SingleSegmentIndexWriter<Codec, D>> {
|
||||
) -> crate::Result<SingleSegmentIndexWriter<D>> {
|
||||
let index = self.create(dir)?;
|
||||
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
|
||||
Ok(index_simple_writer)
|
||||
@@ -223,7 +202,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
/// For other unit tests, prefer the [`RamDirectory`], see:
|
||||
/// [`IndexBuilder::create_in_ram()`].
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_from_tempdir(self) -> crate::Result<Index<Codec>> {
|
||||
pub fn create_from_tempdir(self) -> crate::Result<Index> {
|
||||
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::create_from_tempdir()?);
|
||||
self.create(mmap_directory)
|
||||
}
|
||||
@@ -236,15 +215,12 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
}
|
||||
|
||||
/// Opens or creates a new index in the provided directory
|
||||
pub fn open_or_create<T: Into<Box<dyn Directory>>>(
|
||||
self,
|
||||
dir: T,
|
||||
) -> crate::Result<Index<Codec>> {
|
||||
pub fn open_or_create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||
let dir: Box<dyn Directory> = dir.into();
|
||||
if !Index::exists(&*dir)? {
|
||||
return self.create(dir);
|
||||
}
|
||||
let mut index: Index<Codec> = Index::<Codec>::open_with_codec(dir)?;
|
||||
let mut index = Index::open(dir)?;
|
||||
index.set_tokenizers(self.tokenizer_manager.clone());
|
||||
if index.schema() == self.get_expect_schema()? {
|
||||
Ok(index)
|
||||
@@ -268,26 +244,18 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
/// Creates a new index given an implementation of the trait `Directory`.
|
||||
///
|
||||
/// If a directory previously existed, it will be erased.
|
||||
pub fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index<Codec>> {
|
||||
self.create_avoid_monomorphization(dir.into())
|
||||
}
|
||||
|
||||
fn create_avoid_monomorphization(self, dir: Box<dyn Directory>) -> crate::Result<Index<Codec>> {
|
||||
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||
self.validate()?;
|
||||
let dir = dir.into();
|
||||
let directory = ManagedDirectory::wrap(dir)?;
|
||||
let codec: CodecConfiguration = CodecConfiguration::from_codec(&self.codec);
|
||||
save_new_metas(
|
||||
self.get_expect_schema()?,
|
||||
self.index_settings.clone(),
|
||||
&directory,
|
||||
codec,
|
||||
)?;
|
||||
let schema = self.get_expect_schema()?;
|
||||
let mut metas = IndexMeta::with_schema_and_codec(schema, &self.codec);
|
||||
let mut metas = IndexMeta::with_schema(self.get_expect_schema()?);
|
||||
metas.index_settings = self.index_settings;
|
||||
let mut index: Index<Codec> =
|
||||
Index::<Codec>::open_from_metas(directory, &metas, SegmentMetaInventory::default())?;
|
||||
let mut index = Index::open_from_metas(directory, &metas, SegmentMetaInventory::default());
|
||||
index.set_tokenizers(self.tokenizer_manager);
|
||||
index.set_fast_field_tokenizers(self.fast_field_tokenizer_manager);
|
||||
Ok(index)
|
||||
@@ -296,7 +264,7 @@ impl<Codec: crate::codec::Codec> IndexBuilder<Codec> {
|
||||
|
||||
/// Search Index
|
||||
#[derive(Clone)]
|
||||
pub struct Index<Codec: crate::codec::Codec = crate::codec::StandardCodec> {
|
||||
pub struct Index {
|
||||
directory: ManagedDirectory,
|
||||
schema: Schema,
|
||||
settings: IndexSettings,
|
||||
@@ -304,7 +272,6 @@ pub struct Index<Codec: crate::codec::Codec = crate::codec::StandardCodec> {
|
||||
tokenizers: TokenizerManager,
|
||||
fast_field_tokenizers: TokenizerManager,
|
||||
inventory: SegmentMetaInventory,
|
||||
codec: Codec,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
@@ -312,6 +279,41 @@ impl Index {
|
||||
pub fn builder() -> IndexBuilder {
|
||||
IndexBuilder::new()
|
||||
}
|
||||
/// Examines the directory to see if it contains an index.
|
||||
///
|
||||
/// Effectively, it only checks for the presence of the `meta.json` file.
|
||||
pub fn exists(dir: &dyn Directory) -> Result<bool, OpenReadError> {
|
||||
dir.exists(&META_FILEPATH)
|
||||
}
|
||||
|
||||
/// Accessor to the search executor.
|
||||
///
|
||||
/// This pool is used by default when calling `searcher.search(...)`
|
||||
/// to perform search on the individual segments.
|
||||
///
|
||||
/// By default the executor is single thread, and simply runs in the calling thread.
|
||||
pub fn search_executor(&self) -> &Executor {
|
||||
&self.executor
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
|
||||
self.executor = Executor::multi_thread(num_threads, "tantivy-search-")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Custom thread pool by a outer thread pool.
|
||||
pub fn set_executor(&mut self, executor: Executor) {
|
||||
self.executor = executor;
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with as many threads as there are CPUs on the system.
|
||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||
let default_num_threads = available_parallelism()?.get();
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
}
|
||||
|
||||
/// Creates a new index using the [`RamDirectory`].
|
||||
///
|
||||
@@ -322,13 +324,6 @@ impl Index {
|
||||
IndexBuilder::new().schema(schema).create_in_ram().unwrap()
|
||||
}
|
||||
|
||||
/// Examines the directory to see if it contains an index.
|
||||
///
|
||||
/// Effectively, it only checks for the presence of the `meta.json` file.
|
||||
pub fn exists(directory: &dyn Directory) -> Result<bool, OpenReadError> {
|
||||
directory.exists(&META_FILEPATH)
|
||||
}
|
||||
|
||||
/// Creates a new index in a given filepath.
|
||||
/// The index will use the [`MmapDirectory`].
|
||||
///
|
||||
@@ -375,107 +370,20 @@ impl Index {
|
||||
schema: Schema,
|
||||
settings: IndexSettings,
|
||||
) -> crate::Result<Index> {
|
||||
Self::create_to_avoid_monomorphization(dir.into(), schema, settings)
|
||||
}
|
||||
|
||||
fn create_to_avoid_monomorphization(
|
||||
dir: Box<dyn Directory>,
|
||||
schema: Schema,
|
||||
settings: IndexSettings,
|
||||
) -> crate::Result<Index> {
|
||||
let dir: Box<dyn Directory> = dir.into();
|
||||
let mut builder = IndexBuilder::new().schema(schema);
|
||||
builder = builder.settings(settings);
|
||||
builder.create(dir)
|
||||
}
|
||||
|
||||
/// Opens a new directory from an index path.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> crate::Result<Index> {
|
||||
Self::open_in_dir_to_avoid_monomorphization(directory_path.as_ref())
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn open_in_dir_to_avoid_monomorphization(directory_path: &Path) -> crate::Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
Index::open(mmap_directory)
|
||||
}
|
||||
|
||||
/// Open the index using the provided directory
|
||||
pub fn open<T: Into<Box<dyn Directory>>>(directory: T) -> crate::Result<Index> {
|
||||
Index::<StandardCodec>::open_with_codec(directory.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
/// Returns a version of this index with the standard codec.
|
||||
/// This is useful when you need to pass the index to APIs that
|
||||
/// don't care about the codec (e.g., for reading).
|
||||
pub(crate) fn with_standard_codec(&self) -> Index<StandardCodec> {
|
||||
Index {
|
||||
directory: self.directory.clone(),
|
||||
schema: self.schema.clone(),
|
||||
settings: self.settings.clone(),
|
||||
executor: self.executor.clone(),
|
||||
tokenizers: self.tokenizers.clone(),
|
||||
fast_field_tokenizers: self.fast_field_tokenizers.clone(),
|
||||
inventory: self.inventory.clone(),
|
||||
codec: StandardCodec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Open the index using the provided directory
|
||||
#[inline(never)]
|
||||
pub fn open_with_codec(directory: Box<dyn Directory>) -> crate::Result<Index<Codec>> {
|
||||
let directory = ManagedDirectory::wrap(directory)?;
|
||||
let inventory = SegmentMetaInventory::default();
|
||||
let metas = load_metas(&directory, &inventory)?;
|
||||
let index: Index<Codec> = Index::<Codec>::open_from_metas(directory, &metas, inventory)?;
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Accessor to the codec.
|
||||
pub fn codec(&self) -> &Codec {
|
||||
&self.codec
|
||||
}
|
||||
|
||||
/// Accessor to the search executor.
|
||||
///
|
||||
/// This pool is used by default when calling `searcher.search(...)`
|
||||
/// to perform search on the individual segments.
|
||||
///
|
||||
/// By default the executor is single thread, and simply runs in the calling thread.
|
||||
pub fn search_executor(&self) -> &Executor {
|
||||
&self.executor
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with a given number of threads.
|
||||
pub fn set_multithread_executor(&mut self, num_threads: usize) -> crate::Result<()> {
|
||||
self.executor = Executor::multi_thread(num_threads, "tantivy-search-")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Custom thread pool by a outer thread pool.
|
||||
pub fn set_executor(&mut self, executor: Executor) {
|
||||
self.executor = executor;
|
||||
}
|
||||
|
||||
/// Replace the default single thread search executor pool
|
||||
/// by a thread pool with as many threads as there are CPUs on the system.
|
||||
pub fn set_default_multithread_executor(&mut self) -> crate::Result<()> {
|
||||
let default_num_threads = available_parallelism()?.get();
|
||||
self.set_multithread_executor(default_num_threads)
|
||||
}
|
||||
|
||||
/// Creates a new index given a directory and an [`IndexMeta`].
|
||||
fn open_from_metas<C: crate::codec::Codec>(
|
||||
fn open_from_metas(
|
||||
directory: ManagedDirectory,
|
||||
metas: &IndexMeta,
|
||||
inventory: SegmentMetaInventory,
|
||||
) -> crate::Result<Index<C>> {
|
||||
) -> Index {
|
||||
let schema = metas.schema.clone();
|
||||
let codec = metas.codec.to_codec::<C>()?;
|
||||
Ok(Index {
|
||||
Index {
|
||||
settings: metas.index_settings.clone(),
|
||||
directory,
|
||||
schema,
|
||||
@@ -483,8 +391,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
fast_field_tokenizers: TokenizerManager::default(),
|
||||
executor: Executor::single_thread(),
|
||||
inventory,
|
||||
codec,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Setter for the tokenizer manager.
|
||||
@@ -540,7 +447,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
/// Create a default [`IndexReader`] for the given index.
|
||||
///
|
||||
/// See [`Index.reader_builder()`].
|
||||
pub fn reader(&self) -> crate::Result<IndexReader<Codec>> {
|
||||
pub fn reader(&self) -> crate::Result<IndexReader> {
|
||||
self.reader_builder().try_into()
|
||||
}
|
||||
|
||||
@@ -548,10 +455,17 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
///
|
||||
/// Most project should create at most one reader for a given index.
|
||||
/// This method is typically called only once per `Index` instance.
|
||||
pub fn reader_builder(&self) -> IndexReaderBuilder<Codec> {
|
||||
pub fn reader_builder(&self) -> IndexReaderBuilder {
|
||||
IndexReaderBuilder::new(self.clone())
|
||||
}
|
||||
|
||||
/// Opens a new directory from an index path.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> crate::Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
Index::open(mmap_directory)
|
||||
}
|
||||
|
||||
/// Returns the list of the segment metas tracked by the index.
|
||||
///
|
||||
/// Such segments can of course be part of the index,
|
||||
@@ -578,7 +492,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
let segments = self.searchable_segments()?;
|
||||
let fields_metadata: Vec<Vec<FieldMetadata>> = segments
|
||||
.into_iter()
|
||||
.map(|segment| SegmentReader::open(&segment)?.fields_metadata())
|
||||
.map(|segment| TantivySegmentReader::open(&segment)?.fields_metadata())
|
||||
.collect::<Result<_, _>>()?;
|
||||
Ok(merge_field_meta_data(fields_metadata))
|
||||
}
|
||||
@@ -592,6 +506,16 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
self.inventory.new_segment_meta(segment_id, max_doc)
|
||||
}
|
||||
|
||||
/// Open the index using the provided directory
|
||||
pub fn open<T: Into<Box<dyn Directory>>>(directory: T) -> crate::Result<Index> {
|
||||
let directory = directory.into();
|
||||
let directory = ManagedDirectory::wrap(directory)?;
|
||||
let inventory = SegmentMetaInventory::default();
|
||||
let metas = load_metas(&directory, &inventory)?;
|
||||
let index = Index::open_from_metas(directory, &metas, inventory);
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Reads the index meta file from the directory.
|
||||
pub fn load_metas(&self) -> crate::Result<IndexMeta> {
|
||||
load_metas(self.directory(), &self.inventory)
|
||||
@@ -615,7 +539,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
pub fn writer_with_options<D: Document>(
|
||||
&self,
|
||||
options: IndexWriterOptions,
|
||||
) -> crate::Result<IndexWriter<Codec, D>> {
|
||||
) -> crate::Result<IndexWriter<D>> {
|
||||
let directory_lock = self
|
||||
.directory
|
||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||
@@ -657,7 +581,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
&self,
|
||||
num_threads: usize,
|
||||
overall_memory_budget_in_bytes: usize,
|
||||
) -> crate::Result<IndexWriter<Codec, D>> {
|
||||
) -> crate::Result<IndexWriter<D>> {
|
||||
let memory_arena_in_bytes_per_thread = overall_memory_budget_in_bytes / num_threads;
|
||||
let options = IndexWriterOptions::builder()
|
||||
.num_worker_threads(num_threads)
|
||||
@@ -671,7 +595,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
/// That index writer only simply has a single thread and a memory budget of 15 MB.
|
||||
/// Using a single thread gives us a deterministic allocation of DocId.
|
||||
#[cfg(test)]
|
||||
pub fn writer_for_tests<D: Document>(&self) -> crate::Result<IndexWriter<Codec, D>> {
|
||||
pub fn writer_for_tests<D: Document>(&self) -> crate::Result<IndexWriter<D>> {
|
||||
self.writer_with_num_threads(1, MEMORY_BUDGET_NUM_BYTES_MIN)
|
||||
}
|
||||
|
||||
@@ -689,7 +613,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
pub fn writer<D: Document>(
|
||||
&self,
|
||||
memory_budget_in_bytes: usize,
|
||||
) -> crate::Result<IndexWriter<Codec, D>> {
|
||||
) -> crate::Result<IndexWriter<D>> {
|
||||
let mut num_threads = std::cmp::min(available_parallelism()?.get(), MAX_NUM_THREAD);
|
||||
let memory_budget_num_bytes_per_thread = memory_budget_in_bytes / num_threads;
|
||||
if memory_budget_num_bytes_per_thread < MEMORY_BUDGET_NUM_BYTES_MIN {
|
||||
@@ -716,7 +640,7 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
}
|
||||
|
||||
/// Returns the list of segments that are searchable
|
||||
pub fn searchable_segments(&self) -> crate::Result<Vec<Segment<Codec>>> {
|
||||
pub fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
|
||||
Ok(self
|
||||
.searchable_segment_metas()?
|
||||
.into_iter()
|
||||
@@ -725,12 +649,12 @@ impl<Codec: crate::codec::Codec> Index<Codec> {
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment<Codec> {
|
||||
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
|
||||
Segment::for_index(self.clone(), segment_meta)
|
||||
}
|
||||
|
||||
/// Creates a new segment.
|
||||
pub fn new_segment(&self) -> Segment<Codec> {
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
let segment_meta = self
|
||||
.inventory
|
||||
.new_segment_meta(SegmentId::generate_random(), 0);
|
||||
|
||||
@@ -7,7 +7,6 @@ use std::sync::Arc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::SegmentComponent;
|
||||
use crate::codec::{Codec, CodecConfiguration};
|
||||
use crate::index::SegmentId;
|
||||
use crate::schema::Schema;
|
||||
use crate::store::Compressor;
|
||||
@@ -321,7 +320,6 @@ pub struct IndexMeta {
|
||||
/// This payload is entirely unused by tantivy.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payload: Option<String>,
|
||||
pub codec: CodecConfiguration,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -333,8 +331,6 @@ struct UntrackedIndexMeta {
|
||||
pub opstamp: Opstamp,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payload: Option<String>,
|
||||
#[serde(default)]
|
||||
pub codec: CodecConfiguration,
|
||||
}
|
||||
|
||||
impl UntrackedIndexMeta {
|
||||
@@ -349,7 +345,6 @@ impl UntrackedIndexMeta {
|
||||
schema: self.schema,
|
||||
opstamp: self.opstamp,
|
||||
payload: self.payload,
|
||||
codec: self.codec,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -360,14 +355,13 @@ impl IndexMeta {
|
||||
///
|
||||
/// This new index does not contains any segments.
|
||||
/// Opstamp will the value `0u64`.
|
||||
pub fn with_schema_and_codec<C: Codec>(schema: Schema, codec: &C) -> IndexMeta {
|
||||
pub fn with_schema(schema: Schema) -> IndexMeta {
|
||||
IndexMeta {
|
||||
index_settings: IndexSettings::default(),
|
||||
segments: vec![],
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
codec: CodecConfiguration::from_codec(codec),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,12 +412,11 @@ mod tests {
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
codec: Default::default(),
|
||||
};
|
||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"index_settings":{"docstore_compression":"none","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0,"codec":{"name":"standard"}}"#
|
||||
r#"{"index_settings":{"docstore_compression":"none","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
|
||||
);
|
||||
|
||||
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
#[cfg(feature = "quickwit")]
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
#[cfg(feature = "quickwit")]
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common::json_path_writer::JSON_END_OF_PATH;
|
||||
use common::{BinarySerializable, ByteCount};
|
||||
@@ -27,7 +32,102 @@ use crate::termdict::TermDictionary;
|
||||
///
|
||||
/// `InvertedIndexReader` are created by calling
|
||||
/// [`SegmentReader::inverted_index()`](crate::SegmentReader::inverted_index).
|
||||
pub struct InvertedIndexReader {
|
||||
pub trait InvertedIndexReader: Send + Sync {
|
||||
/// Returns the term info associated with the term.
|
||||
fn get_term_info(&self, term: &Term) -> io::Result<Option<TermInfo>>;
|
||||
|
||||
/// Return the term dictionary datastructure.
|
||||
fn terms(&self) -> &TermDictionary;
|
||||
|
||||
/// Return the fields and types encoded in the dictionary in lexicographic order.
|
||||
/// Only valid on JSON fields.
|
||||
///
|
||||
/// Notice: This requires a full scan and therefore **very expensive**.
|
||||
/// TODO: Move to sstable to use the index.
|
||||
#[doc(hidden)]
|
||||
fn list_encoded_json_fields(&self) -> io::Result<Vec<InvertedIndexFieldSpace>>;
|
||||
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
fn read_block_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<BlockSegmentPostings>>;
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
fn read_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> io::Result<BlockSegmentPostings>;
|
||||
|
||||
/// Returns a posting object given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
fn read_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<SegmentPostings>;
|
||||
|
||||
/// Returns the total number of tokens recorded for all documents
|
||||
/// (including deleted documents).
|
||||
fn total_num_tokens(&self) -> u64;
|
||||
|
||||
/// Returns the segment postings associated with the term, and with the given option,
|
||||
/// or `None` if the term has never been encountered and indexed.
|
||||
fn read_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<SegmentPostings>>;
|
||||
|
||||
/// Returns the number of documents containing the term.
|
||||
fn doc_freq(&self, term: &Term) -> io::Result<u32>;
|
||||
|
||||
/// Returns the number of documents containing the term asynchronously.
|
||||
#[cfg(feature = "quickwit")]
|
||||
fn doc_freq_async<'a>(&'a self, term: &'a Term) -> BoxFuture<'a, io::Result<u32>>;
|
||||
|
||||
/// Warmup a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// returns a boolean, whether the term was found in the dictionary
|
||||
#[cfg(feature = "quickwit")]
|
||||
fn warm_postings<'a>(
|
||||
&'a self,
|
||||
term: &'a Term,
|
||||
with_positions: bool,
|
||||
) -> BoxFuture<'a, io::Result<bool>>;
|
||||
|
||||
/// Warmup the block postings for all terms.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// If you know which terms to pre-load, prefer using [`Self::warm_postings`] or
|
||||
/// [`Self::warm_postings`] instead.
|
||||
#[cfg(feature = "quickwit")]
|
||||
fn warm_postings_full<'a>(&'a self, with_positions: bool) -> BoxFuture<'a, io::Result<()>>;
|
||||
}
|
||||
|
||||
/// Convenient alias for an atomically reference counted inverted index reader handle.
|
||||
pub type ArcInvertedIndexReader = Arc<dyn InvertedIndexReader>;
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
/// Boxed future used by async inverted index reader methods.
|
||||
pub type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;
|
||||
|
||||
/// The tantivy inverted index reader is in charge of accessing
|
||||
/// the inverted index associated with a specific field.
|
||||
///
|
||||
/// This is the default implementation of [`InvertedIndexReader`].
|
||||
pub struct TantivyInvertedIndexReader {
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
positions_file_slice: FileSlice,
|
||||
@@ -36,11 +136,16 @@ pub struct InvertedIndexReader {
|
||||
}
|
||||
|
||||
/// Object that records the amount of space used by a field in an inverted index.
|
||||
pub(crate) struct InvertedIndexFieldSpace {
|
||||
pub struct InvertedIndexFieldSpace {
|
||||
/// The JSON field name (without the parent field).
|
||||
pub field_name: String,
|
||||
/// The field type encoded in the term dictionary.
|
||||
pub field_type: Type,
|
||||
/// Total postings size for this field.
|
||||
pub postings_size: ByteCount,
|
||||
/// Total positions size for this field.
|
||||
pub positions_size: ByteCount,
|
||||
/// Number of terms for this field.
|
||||
pub num_terms: u64,
|
||||
}
|
||||
|
||||
@@ -62,16 +167,16 @@ impl InvertedIndexFieldSpace {
|
||||
}
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
impl TantivyInvertedIndexReader {
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
positions_file_slice: FileSlice,
|
||||
record_option: IndexRecordOption,
|
||||
) -> io::Result<InvertedIndexReader> {
|
||||
) -> io::Result<TantivyInvertedIndexReader> {
|
||||
let (total_num_tokens_slice, postings_body) = postings_file_slice.split(8);
|
||||
let total_num_tokens = u64::deserialize(&mut total_num_tokens_slice.read_bytes()?)?;
|
||||
Ok(InvertedIndexReader {
|
||||
Ok(TantivyInvertedIndexReader {
|
||||
termdict,
|
||||
postings_file_slice: postings_body,
|
||||
positions_file_slice,
|
||||
@@ -82,8 +187,8 @@ impl InvertedIndexReader {
|
||||
|
||||
/// Creates an empty `InvertedIndexReader` object, which
|
||||
/// contains no terms at all.
|
||||
pub fn empty(record_option: IndexRecordOption) -> InvertedIndexReader {
|
||||
InvertedIndexReader {
|
||||
pub fn empty(record_option: IndexRecordOption) -> TantivyInvertedIndexReader {
|
||||
TantivyInvertedIndexReader {
|
||||
termdict: TermDictionary::empty(),
|
||||
postings_file_slice: FileSlice::empty(),
|
||||
positions_file_slice: FileSlice::empty(),
|
||||
@@ -160,29 +265,6 @@ impl InvertedIndexReader {
|
||||
Ok(fields)
|
||||
}
|
||||
|
||||
/// Resets the block segment to another position of the postings
|
||||
/// file.
|
||||
///
|
||||
/// This is useful for enumerating through a list of terms,
|
||||
/// and consuming the associated posting lists while avoiding
|
||||
/// reallocating a [`BlockSegmentPostings`].
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This does not reset the positions list.
|
||||
pub fn reset_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
block_postings: &mut BlockSegmentPostings,
|
||||
) -> io::Result<()> {
|
||||
let postings_slice = self
|
||||
.postings_file_slice
|
||||
.slice(term_info.postings_range.clone());
|
||||
let postings_bytes = postings_slice.read_bytes()?;
|
||||
block_postings.reset(term_info.doc_freq, postings_bytes)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
@@ -282,7 +364,7 @@ impl InvertedIndexReader {
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl InvertedIndexReader {
|
||||
impl TantivyInvertedIndexReader {
|
||||
pub(crate) async fn get_term_info_async(&self, term: &Term) -> io::Result<Option<TermInfo>> {
|
||||
self.termdict.get_async(term.serialized_value_bytes()).await
|
||||
}
|
||||
@@ -492,3 +574,84 @@ impl InvertedIndexReader {
|
||||
.unwrap_or(0u32))
|
||||
}
|
||||
}
|
||||
|
||||
impl InvertedIndexReader for TantivyInvertedIndexReader {
|
||||
fn get_term_info(&self, term: &Term) -> io::Result<Option<TermInfo>> {
|
||||
TantivyInvertedIndexReader::get_term_info(self, term)
|
||||
}
|
||||
|
||||
fn terms(&self) -> &TermDictionary {
|
||||
TantivyInvertedIndexReader::terms(self)
|
||||
}
|
||||
|
||||
fn list_encoded_json_fields(&self) -> io::Result<Vec<InvertedIndexFieldSpace>> {
|
||||
TantivyInvertedIndexReader::list_encoded_json_fields(self)
|
||||
}
|
||||
|
||||
fn read_block_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<BlockSegmentPostings>> {
|
||||
TantivyInvertedIndexReader::read_block_postings(self, term, option)
|
||||
}
|
||||
|
||||
fn read_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
requested_option: IndexRecordOption,
|
||||
) -> io::Result<BlockSegmentPostings> {
|
||||
TantivyInvertedIndexReader::read_block_postings_from_terminfo(
|
||||
self,
|
||||
term_info,
|
||||
requested_option,
|
||||
)
|
||||
}
|
||||
|
||||
fn read_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<SegmentPostings> {
|
||||
TantivyInvertedIndexReader::read_postings_from_terminfo(self, term_info, option)
|
||||
}
|
||||
|
||||
fn total_num_tokens(&self) -> u64 {
|
||||
TantivyInvertedIndexReader::total_num_tokens(self)
|
||||
}
|
||||
|
||||
fn read_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
option: IndexRecordOption,
|
||||
) -> io::Result<Option<SegmentPostings>> {
|
||||
TantivyInvertedIndexReader::read_postings(self, term, option)
|
||||
}
|
||||
|
||||
fn doc_freq(&self, term: &Term) -> io::Result<u32> {
|
||||
TantivyInvertedIndexReader::doc_freq(self, term)
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
fn doc_freq_async<'a>(&'a self, term: &'a Term) -> BoxFuture<'a, io::Result<u32>> {
|
||||
Box::pin(async move { TantivyInvertedIndexReader::doc_freq_async(self, term).await })
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
fn warm_postings<'a>(
|
||||
&'a self,
|
||||
term: &'a Term,
|
||||
with_positions: bool,
|
||||
) -> BoxFuture<'a, io::Result<bool>> {
|
||||
Box::pin(async move {
|
||||
TantivyInvertedIndexReader::warm_postings(self, term, with_positions).await
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
fn warm_postings_full<'a>(&'a self, with_positions: bool) -> BoxFuture<'a, io::Result<()>> {
|
||||
Box::pin(async move {
|
||||
TantivyInvertedIndexReader::warm_postings_full(self, with_positions).await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,8 +13,13 @@ mod segment_reader;
|
||||
pub use self::index::{Index, IndexBuilder};
|
||||
pub(crate) use self::index_meta::SegmentMetaInventory;
|
||||
pub use self::index_meta::{IndexMeta, IndexSettings, Order, SegmentMeta};
|
||||
pub use self::inverted_index_reader::InvertedIndexReader;
|
||||
pub use self::inverted_index_reader::{
|
||||
ArcInvertedIndexReader, InvertedIndexFieldSpace, InvertedIndexReader,
|
||||
TantivyInvertedIndexReader,
|
||||
};
|
||||
pub use self::segment::Segment;
|
||||
pub use self::segment_component::SegmentComponent;
|
||||
pub use self::segment_id::SegmentId;
|
||||
pub use self::segment_reader::{FieldMetadata, SegmentReader};
|
||||
pub use self::segment_reader::{
|
||||
ArcSegmentReader, FieldMetadata, SegmentReader, TantivySegmentReader,
|
||||
};
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use super::SegmentComponent;
|
||||
use crate::codec::StandardCodec;
|
||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||
use crate::directory::{Directory, FileSlice, WritePtr};
|
||||
use crate::index::{Index, SegmentId, SegmentMeta};
|
||||
@@ -11,25 +10,25 @@ use crate::Opstamp;
|
||||
|
||||
/// A segment is a piece of the index.
|
||||
#[derive(Clone)]
|
||||
pub struct Segment<C: crate::codec::Codec = StandardCodec> {
|
||||
index: Index<C>,
|
||||
pub struct Segment {
|
||||
index: Index,
|
||||
meta: SegmentMeta,
|
||||
}
|
||||
|
||||
impl<C: crate::codec::Codec> fmt::Debug for Segment<C> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
impl fmt::Debug for Segment {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Segment({:?})", self.id().uuid_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: crate::codec::Codec> Segment<C> {
|
||||
impl Segment {
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
pub(crate) fn for_index(index: Index<C>, meta: SegmentMeta) -> Segment<C> {
|
||||
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment { index, meta }
|
||||
}
|
||||
|
||||
/// Returns the index the segment belongs to.
|
||||
pub fn index(&self) -> &Index<C> {
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
}
|
||||
|
||||
@@ -47,7 +46,7 @@ impl<C: crate::codec::Codec> Segment<C> {
|
||||
///
|
||||
/// This method is only used when updating `max_doc` from 0
|
||||
/// as we finalize a fresh new segment.
|
||||
pub fn with_max_doc(self, max_doc: u32) -> Segment<C> {
|
||||
pub fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
meta: self.meta.with_max_doc(max_doc),
|
||||
@@ -56,7 +55,7 @@ impl<C: crate::codec::Codec> Segment<C> {
|
||||
|
||||
#[doc(hidden)]
|
||||
#[must_use]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment<C> {
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
||||
|
||||
@@ -9,8 +9,10 @@ use itertools::Itertools;
|
||||
use crate::directory::{CompositeFile, FileSlice};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
|
||||
use crate::index::{InvertedIndexReader, Segment, SegmentComponent, SegmentId};
|
||||
use crate::fieldnorm::FieldNormReaders;
|
||||
use crate::index::{
|
||||
ArcInvertedIndexReader, Segment, SegmentComponent, SegmentId, TantivyInvertedIndexReader,
|
||||
};
|
||||
use crate::json_utils::json_path_sep_to_dot;
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, Type};
|
||||
use crate::space_usage::SegmentSpaceUsage;
|
||||
@@ -18,6 +20,93 @@ use crate::store::StoreReader;
|
||||
use crate::termdict::TermDictionary;
|
||||
use crate::{DocId, Opstamp};
|
||||
|
||||
/// Abstraction over a segment reader for accessing all data structures of a segment.
|
||||
///
|
||||
/// This trait exists to decouple the query layer from the concrete on-disk layout. Alternative
|
||||
/// codecs can implement it to expose their own segment representation.
|
||||
pub trait SegmentReader: Send + Sync {
|
||||
/// Highest document id ever attributed in this segment + 1.
|
||||
fn max_doc(&self) -> DocId;
|
||||
|
||||
/// Number of alive documents. Deleted documents are not counted.
|
||||
fn num_docs(&self) -> DocId;
|
||||
|
||||
/// Returns the schema of the index this segment belongs to.
|
||||
fn schema(&self) -> &Schema;
|
||||
|
||||
/// Return the number of documents that have been deleted in the segment.
|
||||
fn num_deleted_docs(&self) -> DocId {
|
||||
self.max_doc() - self.num_docs()
|
||||
}
|
||||
|
||||
/// Returns true if some of the documents of the segment have been deleted.
|
||||
fn has_deletes(&self) -> bool {
|
||||
self.num_deleted_docs() > 0
|
||||
}
|
||||
|
||||
/// Accessor to a segment's fast field reader.
|
||||
fn fast_fields(&self) -> &FastFieldReaders;
|
||||
|
||||
/// Accessor to the `FacetReader` associated with a given `Field`.
|
||||
fn facet_reader(&self, field_name: &str) -> crate::Result<FacetReader> {
|
||||
let schema = self.schema();
|
||||
let field = schema.get_field(field_name)?;
|
||||
let field_entry = schema.get_field_entry(field);
|
||||
if field_entry.field_type().value_type() != Type::Facet {
|
||||
return Err(crate::TantivyError::SchemaError(format!(
|
||||
"`{field_name}` is not a facet field.`"
|
||||
)));
|
||||
}
|
||||
let Some(facet_column) = self.fast_fields().str(field_name)? else {
|
||||
panic!("Facet Field `{field_name}` is missing. This should not happen");
|
||||
};
|
||||
Ok(FacetReader::new(facet_column))
|
||||
}
|
||||
|
||||
/// Accessor to the segment's field norms readers container.
|
||||
fn fieldnorms_readers(&self) -> &FieldNormReaders;
|
||||
|
||||
/// Accessor to the segment's [`StoreReader`](crate::store::StoreReader).
|
||||
fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result<StoreReader>;
|
||||
|
||||
/// Returns a field reader associated with the field given in argument.
|
||||
fn inverted_index(&self, field: Field) -> crate::Result<ArcInvertedIndexReader>;
|
||||
|
||||
/// Returns the list of fields that have been indexed in the segment.
|
||||
fn fields_metadata(&self) -> crate::Result<Vec<FieldMetadata>>;
|
||||
|
||||
/// Returns the segment id
|
||||
fn segment_id(&self) -> SegmentId;
|
||||
|
||||
/// Returns the delete opstamp
|
||||
fn delete_opstamp(&self) -> Option<Opstamp>;
|
||||
|
||||
/// Returns the bitset representing the alive `DocId`s.
|
||||
fn alive_bitset(&self) -> Option<&AliveBitSet>;
|
||||
|
||||
/// Returns true if the `doc` is marked as deleted.
|
||||
fn is_deleted(&self, doc: DocId) -> bool {
|
||||
self.alive_bitset()
|
||||
.map(|alive_bitset| alive_bitset.is_deleted(doc))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns an iterator that will iterate over the alive document ids
|
||||
fn doc_ids_alive(&self) -> Box<dyn Iterator<Item = DocId> + Send + '_> {
|
||||
if let Some(alive_bitset) = &self.alive_bitset() {
|
||||
Box::new(alive_bitset.iter_alive())
|
||||
} else {
|
||||
Box::new(0u32..self.max_doc())
|
||||
}
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this segment.
|
||||
fn space_usage(&self) -> io::Result<SegmentSpaceUsage>;
|
||||
}
|
||||
|
||||
/// Convenient alias for an atomically reference counted segment reader handle.
|
||||
pub type ArcSegmentReader = Arc<dyn SegmentReader>;
|
||||
|
||||
/// Entry point to access all of the datastructures of the `Segment`
|
||||
///
|
||||
/// - term dictionary
|
||||
@@ -29,8 +118,8 @@ use crate::{DocId, Opstamp};
|
||||
/// The segment reader has a very low memory footprint,
|
||||
/// as close to all of the memory data is mmapped.
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentReader {
|
||||
inv_idx_reader_cache: Arc<RwLock<HashMap<Field, Arc<InvertedIndexReader>>>>,
|
||||
pub struct TantivySegmentReader {
|
||||
inv_idx_reader_cache: Arc<RwLock<HashMap<Field, ArcInvertedIndexReader>>>,
|
||||
|
||||
segment_id: SegmentId,
|
||||
delete_opstamp: Option<Opstamp>,
|
||||
@@ -49,106 +138,17 @@ pub struct SegmentReader {
|
||||
schema: Schema,
|
||||
}
|
||||
|
||||
impl SegmentReader {
|
||||
/// Returns the highest document id ever attributed in
|
||||
/// this segment + 1.
|
||||
pub fn max_doc(&self) -> DocId {
|
||||
self.max_doc
|
||||
}
|
||||
|
||||
/// Returns the number of alive documents.
|
||||
/// Deleted documents are not counted.
|
||||
pub fn num_docs(&self) -> DocId {
|
||||
self.num_docs
|
||||
}
|
||||
|
||||
/// Returns the schema of the index this segment belongs to.
|
||||
pub fn schema(&self) -> &Schema {
|
||||
&self.schema
|
||||
}
|
||||
|
||||
/// Return the number of documents that have been
|
||||
/// deleted in the segment.
|
||||
pub fn num_deleted_docs(&self) -> DocId {
|
||||
self.max_doc - self.num_docs
|
||||
}
|
||||
|
||||
/// Returns true if some of the documents of the segment have been deleted.
|
||||
pub fn has_deletes(&self) -> bool {
|
||||
self.num_deleted_docs() > 0
|
||||
}
|
||||
|
||||
/// Accessor to a segment's fast field reader given a field.
|
||||
///
|
||||
/// Returns the u64 fast value reader if the field
|
||||
/// is a u64 field indexed as "fast".
|
||||
///
|
||||
/// Return a FastFieldNotAvailableError if the field is not
|
||||
/// declared as a fast field in the schema.
|
||||
///
|
||||
/// # Panics
|
||||
/// May panic if the index is corrupted.
|
||||
pub fn fast_fields(&self) -> &FastFieldReaders {
|
||||
&self.fast_fields_readers
|
||||
}
|
||||
|
||||
/// Accessor to the `FacetReader` associated with a given `Field`.
|
||||
pub fn facet_reader(&self, field_name: &str) -> crate::Result<FacetReader> {
|
||||
let schema = self.schema();
|
||||
let field = schema.get_field(field_name)?;
|
||||
let field_entry = schema.get_field_entry(field);
|
||||
if field_entry.field_type().value_type() != Type::Facet {
|
||||
return Err(crate::TantivyError::SchemaError(format!(
|
||||
"`{field_name}` is not a facet field.`"
|
||||
)));
|
||||
}
|
||||
let Some(facet_column) = self.fast_fields().str(field_name)? else {
|
||||
panic!("Facet Field `{field_name}` is missing. This should not happen");
|
||||
};
|
||||
Ok(FacetReader::new(facet_column))
|
||||
}
|
||||
|
||||
/// Accessor to the segment's `Field norms`'s reader.
|
||||
///
|
||||
/// Field norms are the length (in tokens) of the fields.
|
||||
/// It is used in the computation of the [TfIdf](https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html).
|
||||
///
|
||||
/// They are simply stored as a fast field, serialized in
|
||||
/// the `.fieldnorm` file of the segment.
|
||||
pub fn get_fieldnorms_reader(&self, field: Field) -> crate::Result<FieldNormReader> {
|
||||
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
let err_msg = format!(
|
||||
"Field norm not found for field {field_name:?}. Was the field set to record norm \
|
||||
during indexing?"
|
||||
);
|
||||
crate::TantivyError::SchemaError(err_msg)
|
||||
})
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
&self.fieldnorm_readers
|
||||
}
|
||||
|
||||
/// Accessor to the segment's [`StoreReader`](crate::store::StoreReader).
|
||||
///
|
||||
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU.
|
||||
/// The size of blocks is configurable, this should be reflexted in the
|
||||
pub fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_num_blocks)
|
||||
}
|
||||
|
||||
impl TantivySegmentReader {
|
||||
/// Open a new segment for reading.
|
||||
pub fn open<C: crate::codec::Codec>(segment: &Segment<C>) -> crate::Result<SegmentReader> {
|
||||
pub fn open(segment: &Segment) -> crate::Result<TantivySegmentReader> {
|
||||
Self::open_with_custom_alive_set(segment, None)
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
pub fn open_with_custom_alive_set<C: crate::codec::Codec>(
|
||||
segment: &Segment<C>,
|
||||
pub fn open_with_custom_alive_set(
|
||||
segment: &Segment,
|
||||
custom_bitset: Option<AliveBitSet>,
|
||||
) -> crate::Result<SegmentReader> {
|
||||
) -> crate::Result<TantivySegmentReader> {
|
||||
let termdict_file = segment.open_read(SegmentComponent::Terms)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_file)?;
|
||||
|
||||
@@ -190,7 +190,7 @@ impl SegmentReader {
|
||||
.map(|alive_bitset| alive_bitset.num_alive_docs() as u32)
|
||||
.unwrap_or(max_doc);
|
||||
|
||||
Ok(SegmentReader {
|
||||
Ok(TantivySegmentReader {
|
||||
inv_idx_reader_cache: Default::default(),
|
||||
num_docs,
|
||||
max_doc,
|
||||
@@ -206,6 +206,52 @@ impl SegmentReader {
|
||||
schema,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentReader for TantivySegmentReader {
|
||||
/// Returns the highest document id ever attributed in
|
||||
/// this segment + 1.
|
||||
fn max_doc(&self) -> DocId {
|
||||
self.max_doc
|
||||
}
|
||||
|
||||
/// Returns the number of alive documents.
|
||||
/// Deleted documents are not counted.
|
||||
fn num_docs(&self) -> DocId {
|
||||
self.num_docs
|
||||
}
|
||||
|
||||
/// Returns the schema of the index this segment belongs to.
|
||||
fn schema(&self) -> &Schema {
|
||||
&self.schema
|
||||
}
|
||||
|
||||
/// Accessor to a segment's fast field reader given a field.
|
||||
///
|
||||
/// Returns the u64 fast value reader if the field
|
||||
/// is a u64 field indexed as "fast".
|
||||
///
|
||||
/// Return a FastFieldNotAvailableError if the field is not
|
||||
/// declared as a fast field in the schema.
|
||||
///
|
||||
/// # Panics
|
||||
/// May panic if the index is corrupted.
|
||||
fn fast_fields(&self) -> &FastFieldReaders {
|
||||
&self.fast_fields_readers
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn fieldnorms_readers(&self) -> &FieldNormReaders {
|
||||
&self.fieldnorm_readers
|
||||
}
|
||||
|
||||
/// Accessor to the segment's [`StoreReader`](crate::store::StoreReader).
|
||||
///
|
||||
/// `cache_num_blocks` sets the number of decompressed blocks to be cached in an LRU.
|
||||
/// The size of blocks is configurable, this should be reflexted in the
|
||||
fn get_store_reader(&self, cache_num_blocks: usize) -> io::Result<StoreReader> {
|
||||
StoreReader::open(self.store_file.clone(), cache_num_blocks)
|
||||
}
|
||||
|
||||
/// Returns a field reader associated with the field given in argument.
|
||||
/// If the field was not present in the index during indexing time,
|
||||
@@ -219,7 +265,7 @@ impl SegmentReader {
|
||||
/// is returned.
|
||||
/// Similarly, if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index, an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||
fn inverted_index(&self, field: Field) -> crate::Result<ArcInvertedIndexReader> {
|
||||
if let Some(inv_idx_reader) = self
|
||||
.inv_idx_reader_cache
|
||||
.read()
|
||||
@@ -244,7 +290,7 @@ impl SegmentReader {
|
||||
//
|
||||
// Returns an empty inverted index.
|
||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||
return Ok(Arc::new(InvertedIndexReader::empty(record_option)));
|
||||
return Ok(Arc::new(TantivyInvertedIndexReader::empty(record_option)));
|
||||
}
|
||||
|
||||
let record_option = record_option_opt.unwrap();
|
||||
@@ -268,7 +314,7 @@ impl SegmentReader {
|
||||
DataCorruption::comment_only(error_msg)
|
||||
})?;
|
||||
|
||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||
let inv_idx_reader: ArcInvertedIndexReader = Arc::new(TantivyInvertedIndexReader::new(
|
||||
TermDictionary::open(termdict_file)?,
|
||||
postings_file,
|
||||
positions_file,
|
||||
@@ -298,7 +344,7 @@ impl SegmentReader {
|
||||
/// Disclaimer: Some fields may not be listed here. For instance, if the schema contains a json
|
||||
/// field that is not indexed nor a fast field but is stored, it is possible for the field
|
||||
/// to not be listed.
|
||||
pub fn fields_metadata(&self) -> crate::Result<Vec<FieldMetadata>> {
|
||||
fn fields_metadata(&self) -> crate::Result<Vec<FieldMetadata>> {
|
||||
let mut indexed_fields: Vec<FieldMetadata> = Vec::new();
|
||||
let mut map_to_canonical = FnvHashMap::default();
|
||||
for (field, field_entry) in self.schema().fields() {
|
||||
@@ -420,39 +466,22 @@ impl SegmentReader {
|
||||
}
|
||||
|
||||
/// Returns the segment id
|
||||
pub fn segment_id(&self) -> SegmentId {
|
||||
fn segment_id(&self) -> SegmentId {
|
||||
self.segment_id
|
||||
}
|
||||
|
||||
/// Returns the delete opstamp
|
||||
pub fn delete_opstamp(&self) -> Option<Opstamp> {
|
||||
fn delete_opstamp(&self) -> Option<Opstamp> {
|
||||
self.delete_opstamp
|
||||
}
|
||||
|
||||
/// Returns the bitset representing the alive `DocId`s.
|
||||
pub fn alive_bitset(&self) -> Option<&AliveBitSet> {
|
||||
fn alive_bitset(&self) -> Option<&AliveBitSet> {
|
||||
self.alive_bitset_opt.as_ref()
|
||||
}
|
||||
|
||||
/// Returns true if the `doc` is marked
|
||||
/// as deleted.
|
||||
pub fn is_deleted(&self, doc: DocId) -> bool {
|
||||
self.alive_bitset()
|
||||
.map(|alive_bitset| alive_bitset.is_deleted(doc))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns an iterator that will iterate over the alive document ids
|
||||
pub fn doc_ids_alive(&self) -> Box<dyn Iterator<Item = DocId> + Send + '_> {
|
||||
if let Some(alive_bitset) = &self.alive_bitset_opt {
|
||||
Box::new(alive_bitset.iter_alive())
|
||||
} else {
|
||||
Box::new(0u32..self.max_doc)
|
||||
}
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this segment.
|
||||
pub fn space_usage(&self) -> io::Result<SegmentSpaceUsage> {
|
||||
fn space_usage(&self) -> io::Result<SegmentSpaceUsage> {
|
||||
Ok(SegmentSpaceUsage::new(
|
||||
self.num_docs(),
|
||||
self.termdict_composite.space_usage(self.schema()),
|
||||
@@ -576,7 +605,7 @@ fn intersect_alive_bitset(
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentReader {
|
||||
impl fmt::Debug for TantivySegmentReader {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "SegmentReader({:?})", self.segment_id)
|
||||
}
|
||||
|
||||
@@ -250,11 +250,15 @@ mod tests {
|
||||
|
||||
struct DummyWeight;
|
||||
impl Weight for DummyWeight {
|
||||
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(
|
||||
&self,
|
||||
_reader: &dyn SegmentReader,
|
||||
_boost: Score,
|
||||
) -> crate::Result<Box<dyn Scorer>> {
|
||||
Err(crate::TantivyError::InternalError("dummy impl".to_owned()))
|
||||
}
|
||||
|
||||
fn explain(&self, _reader: &SegmentReader, _doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, _reader: &dyn SegmentReader, _doc: DocId) -> crate::Result<Explanation> {
|
||||
Err(crate::TantivyError::InternalError("dummy impl".to_owned()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,11 +9,12 @@ use smallvec::smallvec;
|
||||
use super::operation::{AddOperation, UserOperation};
|
||||
use super::segment_updater::SegmentUpdater;
|
||||
use super::{AddBatch, AddBatchReceiver, AddBatchSender, PreparedCommit};
|
||||
use crate::codec::{Codec, StandardCodec};
|
||||
use crate::directory::{DirectoryLock, GarbageCollectionResult, TerminatingWrite};
|
||||
use crate::error::TantivyError;
|
||||
use crate::fastfield::write_alive_bitset;
|
||||
use crate::index::{Index, Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader};
|
||||
use crate::index::{
|
||||
Index, Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader, TantivySegmentReader,
|
||||
};
|
||||
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
||||
use crate::indexer::index_writer_status::IndexWriterStatus;
|
||||
@@ -69,12 +70,12 @@ pub struct IndexWriterOptions {
|
||||
/// indexing queue.
|
||||
/// Each indexing thread builds its own independent [`Segment`], via
|
||||
/// a `SegmentWriter` object.
|
||||
pub struct IndexWriter<C: Codec = StandardCodec, D: Document = TantivyDocument> {
|
||||
pub struct IndexWriter<D: Document = TantivyDocument> {
|
||||
// the lock is just used to bind the
|
||||
// lifetime of the lock with that of the IndexWriter.
|
||||
_directory_lock: Option<DirectoryLock>,
|
||||
|
||||
index: Index<C>,
|
||||
index: Index,
|
||||
|
||||
options: IndexWriterOptions,
|
||||
|
||||
@@ -83,7 +84,7 @@ pub struct IndexWriter<C: Codec = StandardCodec, D: Document = TantivyDocument>
|
||||
index_writer_status: IndexWriterStatus<D>,
|
||||
operation_sender: AddBatchSender<D>,
|
||||
|
||||
segment_updater: SegmentUpdater<C>,
|
||||
segment_updater: SegmentUpdater,
|
||||
|
||||
worker_id: usize,
|
||||
|
||||
@@ -95,7 +96,7 @@ pub struct IndexWriter<C: Codec = StandardCodec, D: Document = TantivyDocument>
|
||||
|
||||
fn compute_deleted_bitset(
|
||||
alive_bitset: &mut BitSet,
|
||||
segment_reader: &SegmentReader,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
delete_cursor: &mut DeleteCursor,
|
||||
doc_opstamps: &DocToOpstampMapping,
|
||||
target_opstamp: Opstamp,
|
||||
@@ -129,8 +130,8 @@ fn compute_deleted_bitset(
|
||||
/// is `==` target_opstamp.
|
||||
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
||||
/// the `target_opstamp`, `segment_entry` is not updated.
|
||||
pub fn advance_deletes<C: Codec>(
|
||||
mut segment: Segment<C>,
|
||||
pub fn advance_deletes(
|
||||
mut segment: Segment,
|
||||
segment_entry: &mut SegmentEntry,
|
||||
target_opstamp: Opstamp,
|
||||
) -> crate::Result<()> {
|
||||
@@ -144,7 +145,7 @@ pub fn advance_deletes<C: Codec>(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let segment_reader = SegmentReader::open(&segment)?;
|
||||
let segment_reader = TantivySegmentReader::open(&segment)?;
|
||||
|
||||
let max_doc = segment_reader.max_doc();
|
||||
let mut alive_bitset: BitSet = match segment_entry.alive_bitset() {
|
||||
@@ -180,11 +181,11 @@ pub fn advance_deletes<C: Codec>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn index_documents<C: crate::codec::Codec, D: Document>(
|
||||
fn index_documents<D: Document>(
|
||||
memory_budget: usize,
|
||||
segment: Segment<C>,
|
||||
segment: Segment,
|
||||
grouped_document_iterator: &mut dyn Iterator<Item = AddBatch<D>>,
|
||||
segment_updater: &SegmentUpdater<C>,
|
||||
segment_updater: &SegmentUpdater,
|
||||
mut delete_cursor: DeleteCursor,
|
||||
) -> crate::Result<()> {
|
||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone())?;
|
||||
@@ -227,8 +228,8 @@ fn index_documents<C: crate::codec::Codec, D: Document>(
|
||||
}
|
||||
|
||||
/// `doc_opstamps` is required to be non-empty.
|
||||
fn apply_deletes<C: crate::codec::Codec>(
|
||||
segment: &Segment<C>,
|
||||
fn apply_deletes(
|
||||
segment: &Segment,
|
||||
delete_cursor: &mut DeleteCursor,
|
||||
doc_opstamps: &[Opstamp],
|
||||
) -> crate::Result<Option<BitSet>> {
|
||||
@@ -244,7 +245,7 @@ fn apply_deletes<C: crate::codec::Codec>(
|
||||
.max()
|
||||
.expect("Empty DocOpstamp is forbidden");
|
||||
|
||||
let segment_reader = SegmentReader::open(segment)?;
|
||||
let segment_reader = TantivySegmentReader::open(segment)?;
|
||||
let doc_to_opstamps = DocToOpstampMapping::WithMap(doc_opstamps);
|
||||
|
||||
let max_doc = segment.meta().max_doc();
|
||||
@@ -263,7 +264,7 @@ fn apply_deletes<C: crate::codec::Codec>(
|
||||
})
|
||||
}
|
||||
|
||||
impl<C: Codec, D: Document> IndexWriter<C, D> {
|
||||
impl<D: Document> IndexWriter<D> {
|
||||
/// Create a new index writer. Attempts to acquire a lockfile.
|
||||
///
|
||||
/// The lockfile should be deleted on drop, but it is possible
|
||||
@@ -279,7 +280,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
|
||||
/// If the memory arena per thread is too small or too big, returns
|
||||
/// `TantivyError::InvalidArgument`
|
||||
pub(crate) fn new(
|
||||
index: &Index<C>,
|
||||
index: &Index,
|
||||
options: IndexWriterOptions,
|
||||
directory_lock: DirectoryLock,
|
||||
) -> crate::Result<Self> {
|
||||
@@ -346,7 +347,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
|
||||
}
|
||||
|
||||
/// Accessor to the index.
|
||||
pub fn index(&self) -> &Index<C> {
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
}
|
||||
|
||||
@@ -394,7 +395,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
|
||||
/// It is safe to start writing file associated with the new `Segment`.
|
||||
/// These will not be garbage collected as long as an instance object of
|
||||
/// `SegmentMeta` object associated with the new `Segment` is "alive".
|
||||
pub fn new_segment(&self) -> Segment<C> {
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
self.index.new_segment()
|
||||
}
|
||||
|
||||
@@ -616,7 +617,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
|
||||
/// It is also possible to add a payload to the `commit`
|
||||
/// using this API.
|
||||
/// See [`PreparedCommit::set_payload()`].
|
||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit<'_, C, D>> {
|
||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit<'_, D>> {
|
||||
// Here, because we join all of the worker threads,
|
||||
// all of the segment update for this commit have been
|
||||
// sent.
|
||||
@@ -666,7 +667,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
|
||||
self.prepare_commit()?.commit()
|
||||
}
|
||||
|
||||
pub(crate) fn segment_updater(&self) -> &SegmentUpdater<C> {
|
||||
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
|
||||
&self.segment_updater
|
||||
}
|
||||
|
||||
@@ -805,7 +806,7 @@ impl<C: Codec, D: Document> IndexWriter<C, D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: Codec, D: Document> Drop for IndexWriter<C, D> {
|
||||
impl<D: Document> Drop for IndexWriter<D> {
|
||||
fn drop(&mut self) {
|
||||
self.segment_updater.kill();
|
||||
self.drop_sender();
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use columnar::{
|
||||
ColumnType, ColumnarReader, MergeRowOrder, RowAddr, ShuffleMergeOrder, StackMergeOrder,
|
||||
};
|
||||
@@ -12,14 +10,14 @@ use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::AliveBitSet;
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||
use crate::index::{Segment, SegmentComponent, SegmentReader};
|
||||
use crate::index::{Segment, SegmentComponent, SegmentReader, TantivySegmentReader};
|
||||
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
|
||||
use crate::indexer::SegmentSerializer;
|
||||
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
|
||||
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema};
|
||||
use crate::store::StoreWriter;
|
||||
use crate::termdict::{TermMerger, TermOrdinal};
|
||||
use crate::{DocAddress, DocId, InvertedIndexReader};
|
||||
use crate::{ArcInvertedIndexReader, DocAddress, DocId};
|
||||
|
||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||
///
|
||||
@@ -27,7 +25,7 @@ use crate::{DocAddress, DocId, InvertedIndexReader};
|
||||
pub const MAX_DOC_LIMIT: u32 = 1 << 31;
|
||||
|
||||
fn estimate_total_num_tokens_in_single_segment(
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
field: Field,
|
||||
) -> crate::Result<u64> {
|
||||
// There are no deletes. We can simply use the exact value saved into the posting list.
|
||||
@@ -68,7 +66,7 @@ fn estimate_total_num_tokens_in_single_segment(
|
||||
Ok((segment_num_tokens as f64 * ratio) as u64)
|
||||
}
|
||||
|
||||
fn estimate_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::Result<u64> {
|
||||
fn estimate_total_num_tokens(readers: &[TantivySegmentReader], field: Field) -> crate::Result<u64> {
|
||||
let mut total_num_tokens: u64 = 0;
|
||||
for reader in readers {
|
||||
total_num_tokens += estimate_total_num_tokens_in_single_segment(reader, field)?;
|
||||
@@ -78,7 +76,7 @@ fn estimate_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::
|
||||
|
||||
pub struct IndexMerger {
|
||||
schema: Schema,
|
||||
pub(crate) readers: Vec<SegmentReader>,
|
||||
pub(crate) readers: Vec<TantivySegmentReader>,
|
||||
max_doc: u32,
|
||||
}
|
||||
|
||||
@@ -145,10 +143,7 @@ fn extract_fast_field_required_columns(schema: &Schema) -> Vec<(String, ColumnTy
|
||||
}
|
||||
|
||||
impl IndexMerger {
|
||||
pub fn open<C: crate::codec::Codec>(
|
||||
schema: Schema,
|
||||
segments: &[Segment<C>],
|
||||
) -> crate::Result<IndexMerger> {
|
||||
pub fn open(schema: Schema, segments: &[Segment]) -> crate::Result<IndexMerger> {
|
||||
let alive_bitset = segments.iter().map(|_| None).collect_vec();
|
||||
Self::open_with_custom_alive_set(schema, segments, alive_bitset)
|
||||
}
|
||||
@@ -165,16 +160,18 @@ impl IndexMerger {
|
||||
// This can be used to merge but also apply an additional filter.
|
||||
// One use case is demux, which is basically taking a list of
|
||||
// segments and partitions them e.g. by a value in a field.
|
||||
pub fn open_with_custom_alive_set<C: crate::codec::Codec>(
|
||||
pub fn open_with_custom_alive_set(
|
||||
schema: Schema,
|
||||
segments: &[Segment<C>],
|
||||
segments: &[Segment],
|
||||
alive_bitset_opt: Vec<Option<AliveBitSet>>,
|
||||
) -> crate::Result<IndexMerger> {
|
||||
let mut readers = vec![];
|
||||
for (segment, new_alive_bitset_opt) in segments.iter().zip(alive_bitset_opt) {
|
||||
if segment.meta().num_docs() > 0 {
|
||||
let reader =
|
||||
SegmentReader::open_with_custom_alive_set(segment, new_alive_bitset_opt)?;
|
||||
let reader = TantivySegmentReader::open_with_custom_alive_set(
|
||||
segment,
|
||||
new_alive_bitset_opt,
|
||||
)?;
|
||||
readers.push(reader);
|
||||
}
|
||||
}
|
||||
@@ -207,8 +204,20 @@ impl IndexMerger {
|
||||
let fieldnorms_readers: Vec<FieldNormReader> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| reader.get_fieldnorms_reader(field))
|
||||
.collect::<Result<_, _>>()?;
|
||||
.map(|reader| {
|
||||
reader
|
||||
.fieldnorms_readers()
|
||||
.get_field(field)?
|
||||
.ok_or_else(|| {
|
||||
let field_name = self.schema.get_field_name(field);
|
||||
let err_msg = format!(
|
||||
"Field norm not found for field {field_name:?}. Was the field set \
|
||||
to record norm during indexing?"
|
||||
);
|
||||
crate::TantivyError::SchemaError(err_msg)
|
||||
})
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let fieldnorms_reader = &fieldnorms_readers[old_doc_addr.segment_ord as usize];
|
||||
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(old_doc_addr.doc_id);
|
||||
@@ -265,7 +274,7 @@ impl IndexMerger {
|
||||
}),
|
||||
);
|
||||
|
||||
let has_deletes: bool = self.readers.iter().any(SegmentReader::has_deletes);
|
||||
let has_deletes: bool = self.readers.iter().any(|reader| reader.has_deletes());
|
||||
let mapping_type = if has_deletes {
|
||||
MappingType::StackedWithDeletes
|
||||
} else {
|
||||
@@ -300,7 +309,7 @@ impl IndexMerger {
|
||||
|
||||
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
|
||||
|
||||
let field_readers: Vec<Arc<InvertedIndexReader>> = self
|
||||
let field_readers: Vec<ArcInvertedIndexReader> = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| reader.inverted_index(indexed_field))
|
||||
@@ -369,7 +378,7 @@ impl IndexMerger {
|
||||
// Let's compute the list of non-empty posting lists
|
||||
for (segment_ord, term_info) in merged_terms.current_segment_ords_and_term_infos() {
|
||||
let segment_reader = &self.readers[segment_ord];
|
||||
let inverted_index: &InvertedIndexReader = &field_readers[segment_ord];
|
||||
let inverted_index = field_readers[segment_ord].as_ref();
|
||||
let segment_postings = inverted_index
|
||||
.read_postings_from_terminfo(&term_info, segment_postings_option)?;
|
||||
let alive_bitset_opt = segment_reader.alive_bitset();
|
||||
@@ -528,10 +537,7 @@ impl IndexMerger {
|
||||
///
|
||||
/// # Returns
|
||||
/// The number of documents in the resulting segment.
|
||||
pub fn write<C: crate::codec::Codec>(
|
||||
&self,
|
||||
mut serializer: SegmentSerializer<C>,
|
||||
) -> crate::Result<u32> {
|
||||
pub fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
|
||||
let doc_id_mapping = self.get_doc_id_from_concatenated_data()?;
|
||||
debug!("write-fieldnorms");
|
||||
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||
@@ -1540,7 +1546,7 @@ mod tests {
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
let mut term_scorer = term_query
|
||||
.specialized_weight(EnableScoring::enabled_from_searcher(&searcher))?
|
||||
.term_scorer_for_test(segment_reader, 1.0)?
|
||||
.term_scorer_for_test(segment_reader.as_ref(), 1.0)?
|
||||
.unwrap();
|
||||
// the difference compared to before is intrinsic to the bm25 formula. no worries
|
||||
// there.
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use super::IndexWriter;
|
||||
use crate::codec::Codec;
|
||||
use crate::schema::document::Document;
|
||||
use crate::{FutureResult, Opstamp, TantivyDocument};
|
||||
|
||||
/// A prepared commit
|
||||
pub struct PreparedCommit<'a, C: Codec, D: Document = TantivyDocument> {
|
||||
index_writer: &'a mut IndexWriter<C, D>,
|
||||
pub struct PreparedCommit<'a, D: Document = TantivyDocument> {
|
||||
index_writer: &'a mut IndexWriter<D>,
|
||||
payload: Option<String>,
|
||||
opstamp: Opstamp,
|
||||
}
|
||||
|
||||
impl<'a, C: Codec, D: Document> PreparedCommit<'a, C, D> {
|
||||
pub(crate) fn new(index_writer: &'a mut IndexWriter<C, D>, opstamp: Opstamp) -> Self {
|
||||
impl<'a, D: Document> PreparedCommit<'a, D> {
|
||||
pub(crate) fn new(index_writer: &'a mut IndexWriter<D>, opstamp: Opstamp) -> Self {
|
||||
Self {
|
||||
index_writer,
|
||||
payload: None,
|
||||
|
||||
@@ -8,17 +8,17 @@ use crate::store::StoreWriter;
|
||||
|
||||
/// Segment serializer is in charge of laying out on disk
|
||||
/// the data accumulated and sorted by the `SegmentWriter`.
|
||||
pub struct SegmentSerializer<C: crate::codec::Codec> {
|
||||
segment: Segment<C>,
|
||||
pub struct SegmentSerializer {
|
||||
segment: Segment,
|
||||
pub(crate) store_writer: StoreWriter,
|
||||
fast_field_write: WritePtr,
|
||||
fieldnorms_serializer: Option<FieldNormsSerializer>,
|
||||
postings_serializer: InvertedIndexSerializer,
|
||||
}
|
||||
|
||||
impl<C: crate::codec::Codec> SegmentSerializer<C> {
|
||||
impl SegmentSerializer {
|
||||
/// Creates a new `SegmentSerializer`.
|
||||
pub fn for_segment(mut segment: Segment<C>) -> crate::Result<SegmentSerializer<C>> {
|
||||
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
|
||||
let settings = segment.index().settings().clone();
|
||||
let store_writer = {
|
||||
let store_write = segment.open_write(SegmentComponent::Store)?;
|
||||
@@ -50,7 +50,7 @@ impl<C: crate::codec::Codec> SegmentSerializer<C> {
|
||||
self.store_writer.mem_usage()
|
||||
}
|
||||
|
||||
pub fn segment(&self) -> &Segment<C> {
|
||||
pub fn segment(&self) -> &Segment {
|
||||
&self.segment
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ use std::sync::{Arc, RwLock};
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
|
||||
use super::segment_manager::SegmentManager;
|
||||
use crate::codec::{Codec, CodecConfiguration};
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
||||
use crate::fastfield::AliveBitSet;
|
||||
@@ -62,10 +61,10 @@ pub(crate) fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate:
|
||||
// We voluntarily pass a merge_operation ref to guarantee that
|
||||
// the merge_operation is alive during the process
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SegmentUpdater<C: Codec>(Arc<InnerSegmentUpdater<C>>);
|
||||
pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>);
|
||||
|
||||
impl<C: Codec> Deref for SegmentUpdater<C> {
|
||||
type Target = InnerSegmentUpdater<C>;
|
||||
impl Deref for SegmentUpdater {
|
||||
type Target = InnerSegmentUpdater;
|
||||
|
||||
#[inline]
|
||||
fn deref(&self) -> &Self::Target {
|
||||
@@ -73,8 +72,8 @@ impl<C: Codec> Deref for SegmentUpdater<C> {
|
||||
}
|
||||
}
|
||||
|
||||
fn garbage_collect_files<C: Codec>(
|
||||
segment_updater: SegmentUpdater<C>,
|
||||
fn garbage_collect_files(
|
||||
segment_updater: SegmentUpdater,
|
||||
) -> crate::Result<GarbageCollectionResult> {
|
||||
info!("Running garbage collection");
|
||||
let mut index = segment_updater.index.clone();
|
||||
@@ -85,8 +84,8 @@ fn garbage_collect_files<C: Codec>(
|
||||
|
||||
/// Merges a list of segments the list of segment givens in the `segment_entries`.
|
||||
/// This function happens in the calling thread and is computationally expensive.
|
||||
fn merge<Codec: crate::codec::Codec>(
|
||||
index: &Index<Codec>,
|
||||
fn merge(
|
||||
index: &Index,
|
||||
mut segment_entries: Vec<SegmentEntry>,
|
||||
target_opstamp: Opstamp,
|
||||
) -> crate::Result<Option<SegmentEntry>> {
|
||||
@@ -109,7 +108,7 @@ fn merge<Codec: crate::codec::Codec>(
|
||||
|
||||
let delete_cursor = segment_entries[0].delete_cursor().clone();
|
||||
|
||||
let segments: Vec<Segment<Codec>> = segment_entries
|
||||
let segments: Vec<Segment> = segment_entries
|
||||
.iter()
|
||||
.map(|segment_entry| index.segment(segment_entry.meta().clone()))
|
||||
.collect();
|
||||
@@ -140,10 +139,10 @@ fn merge<Codec: crate::codec::Codec>(
|
||||
/// meant to work if you have an `IndexWriter` running for the origin indices, or
|
||||
/// the destination `Index`.
|
||||
#[doc(hidden)]
|
||||
pub fn merge_indices<Codec: crate::codec::Codec>(
|
||||
indices: &[Index<Codec>],
|
||||
output_directory: Box<dyn Directory>,
|
||||
) -> crate::Result<Index<Codec>> {
|
||||
pub fn merge_indices<T: Into<Box<dyn Directory>>>(
|
||||
indices: &[Index],
|
||||
output_directory: T,
|
||||
) -> crate::Result<Index> {
|
||||
if indices.is_empty() {
|
||||
// If there are no indices to merge, there is no need to do anything.
|
||||
return Err(crate::TantivyError::InvalidArgument(
|
||||
@@ -164,7 +163,7 @@ pub fn merge_indices<Codec: crate::codec::Codec>(
|
||||
));
|
||||
}
|
||||
|
||||
let mut segments: Vec<Segment<Codec>> = Vec::new();
|
||||
let mut segments: Vec<Segment> = Vec::new();
|
||||
for index in indices {
|
||||
segments.extend(index.searchable_segments()?);
|
||||
}
|
||||
@@ -186,12 +185,12 @@ pub fn merge_indices<Codec: crate::codec::Codec>(
|
||||
/// meant to work if you have an `IndexWriter` running for the origin indices, or
|
||||
/// the destination `Index`.
|
||||
#[doc(hidden)]
|
||||
pub fn merge_filtered_segments<Codec: crate::codec::Codec, T: Into<Box<dyn Directory>>>(
|
||||
segments: &[Segment<Codec>],
|
||||
pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
|
||||
segments: &[Segment],
|
||||
target_settings: IndexSettings,
|
||||
filter_doc_ids: Vec<Option<AliveBitSet>>,
|
||||
output_directory: T,
|
||||
) -> crate::Result<Index<Codec>> {
|
||||
) -> crate::Result<Index> {
|
||||
if segments.is_empty() {
|
||||
// If there are no indices to merge, there is no need to do anything.
|
||||
return Err(crate::TantivyError::InvalidArgument(
|
||||
@@ -212,12 +211,11 @@ pub fn merge_filtered_segments<Codec: crate::codec::Codec, T: Into<Box<dyn Direc
|
||||
));
|
||||
}
|
||||
|
||||
let mut merged_index: Index<Codec> = Index::builder()
|
||||
.schema(target_schema.clone())
|
||||
.codec(segments[0].index().codec().clone())
|
||||
.settings(target_settings.clone())
|
||||
.create(output_directory.into())?;
|
||||
|
||||
let mut merged_index = Index::create(
|
||||
output_directory,
|
||||
target_schema.clone(),
|
||||
target_settings.clone(),
|
||||
)?;
|
||||
let merged_segment = merged_index.new_segment();
|
||||
let merged_segment_id = merged_segment.id();
|
||||
let merger: IndexMerger =
|
||||
@@ -237,7 +235,6 @@ pub fn merge_filtered_segments<Codec: crate::codec::Codec, T: Into<Box<dyn Direc
|
||||
))
|
||||
.trim_end()
|
||||
);
|
||||
let codec_configuration = CodecConfiguration::from_codec(segments[0].index().codec());
|
||||
|
||||
let index_meta = IndexMeta {
|
||||
index_settings: target_settings, // index_settings of all segments should be the same
|
||||
@@ -245,7 +242,6 @@ pub fn merge_filtered_segments<Codec: crate::codec::Codec, T: Into<Box<dyn Direc
|
||||
schema: target_schema,
|
||||
opstamp: 0u64,
|
||||
payload: Some(stats),
|
||||
codec: codec_configuration,
|
||||
};
|
||||
|
||||
// save the meta.json
|
||||
@@ -254,7 +250,7 @@ pub fn merge_filtered_segments<Codec: crate::codec::Codec, T: Into<Box<dyn Direc
|
||||
Ok(merged_index)
|
||||
}
|
||||
|
||||
pub(crate) struct InnerSegmentUpdater<C: Codec> {
|
||||
pub(crate) struct InnerSegmentUpdater {
|
||||
// we keep a copy of the current active IndexMeta to
|
||||
// avoid loading the file every time we need it in the
|
||||
// `SegmentUpdater`.
|
||||
@@ -265,7 +261,7 @@ pub(crate) struct InnerSegmentUpdater<C: Codec> {
|
||||
pool: ThreadPool,
|
||||
merge_thread_pool: ThreadPool,
|
||||
|
||||
index: Index<C>,
|
||||
index: Index,
|
||||
segment_manager: SegmentManager,
|
||||
merge_policy: RwLock<Arc<dyn MergePolicy>>,
|
||||
killed: AtomicBool,
|
||||
@@ -273,13 +269,13 @@ pub(crate) struct InnerSegmentUpdater<C: Codec> {
|
||||
merge_operations: MergeOperationInventory,
|
||||
}
|
||||
|
||||
impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
|
||||
impl SegmentUpdater {
|
||||
pub fn create(
|
||||
index: Index<Codec>,
|
||||
index: Index,
|
||||
stamper: Stamper,
|
||||
delete_cursor: &DeleteCursor,
|
||||
num_merge_threads: usize,
|
||||
) -> crate::Result<Self> {
|
||||
) -> crate::Result<SegmentUpdater> {
|
||||
let segments = index.searchable_segment_metas()?;
|
||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
||||
let pool = ThreadPoolBuilder::new()
|
||||
@@ -408,14 +404,12 @@ impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
|
||||
//
|
||||
// Segment 1 from disk 1, Segment 1 from disk 2, etc.
|
||||
committed_segment_metas.sort_by_key(|segment_meta| -(segment_meta.max_doc() as i32));
|
||||
let codec = CodecConfiguration::from_codec(index.codec());
|
||||
let index_meta = IndexMeta {
|
||||
index_settings: index.settings().clone(),
|
||||
segments: committed_segment_metas,
|
||||
schema: index.schema(),
|
||||
opstamp,
|
||||
payload: commit_message,
|
||||
codec,
|
||||
};
|
||||
// TODO add context to the error.
|
||||
save_metas(&index_meta, directory.box_clone().borrow_mut())?;
|
||||
@@ -449,7 +443,7 @@ impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
|
||||
opstamp: Opstamp,
|
||||
payload: Option<String>,
|
||||
) -> FutureResult<Opstamp> {
|
||||
let segment_updater: SegmentUpdater<Codec> = self.clone();
|
||||
let segment_updater: SegmentUpdater = self.clone();
|
||||
self.schedule_task(move || {
|
||||
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||
segment_updater.segment_manager.commit(segment_entries);
|
||||
@@ -708,7 +702,6 @@ impl<Codec: crate::codec::Codec> SegmentUpdater<Codec> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::merge_indices;
|
||||
use crate::codec::StandardCodec;
|
||||
use crate::collector::TopDocs;
|
||||
use crate::directory::RamDirectory;
|
||||
use crate::fastfield::AliveBitSet;
|
||||
@@ -717,7 +710,7 @@ mod tests {
|
||||
use crate::indexer::segment_updater::merge_filtered_segments;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::*;
|
||||
use crate::{Directory, DocAddress, Index, Segment};
|
||||
use crate::{Directory, DocAddress, Index, Segment, SegmentReader};
|
||||
|
||||
#[test]
|
||||
fn test_delete_during_merge() -> crate::Result<()> {
|
||||
@@ -922,7 +915,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_merge_empty_indices_array() {
|
||||
let merge_result = merge_indices::<StandardCodec>(&[], Box::new(RamDirectory::default()));
|
||||
let merge_result = merge_indices(&[], RamDirectory::default());
|
||||
assert!(merge_result.is_err());
|
||||
}
|
||||
|
||||
@@ -949,10 +942,7 @@ mod tests {
|
||||
};
|
||||
|
||||
// mismatched schema index list
|
||||
let result = merge_indices(
|
||||
&[first_index, second_index],
|
||||
Box::new(RamDirectory::default()),
|
||||
);
|
||||
let result = merge_indices(&[first_index, second_index], RamDirectory::default());
|
||||
assert!(result.is_err());
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -4,7 +4,6 @@ use itertools::Itertools;
|
||||
use tokenizer_api::BoxTokenStream;
|
||||
|
||||
use super::operation::AddOperation;
|
||||
use crate::codec::Codec;
|
||||
use crate::fastfield::FastFieldsWriter;
|
||||
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
|
||||
use crate::index::{Segment, SegmentComponent};
|
||||
@@ -46,11 +45,11 @@ fn compute_initial_table_size(per_thread_memory_budget: usize) -> crate::Result<
|
||||
///
|
||||
/// They creates the postings list in anonymous memory.
|
||||
/// The segment is laid on disk when the segment gets `finalized`.
|
||||
pub struct SegmentWriter<Codec: crate::codec::Codec> {
|
||||
pub struct SegmentWriter {
|
||||
pub(crate) max_doc: DocId,
|
||||
pub(crate) ctx: IndexingContext,
|
||||
pub(crate) per_field_postings_writers: PerFieldPostingsWriter,
|
||||
pub(crate) segment_serializer: SegmentSerializer<Codec>,
|
||||
pub(crate) segment_serializer: SegmentSerializer,
|
||||
pub(crate) fast_field_writers: FastFieldsWriter,
|
||||
pub(crate) fieldnorms_writer: FieldNormsWriter,
|
||||
pub(crate) json_path_writer: JsonPathWriter,
|
||||
@@ -61,7 +60,7 @@ pub struct SegmentWriter<Codec: crate::codec::Codec> {
|
||||
schema: Schema,
|
||||
}
|
||||
|
||||
impl<Codec: crate::codec::Codec> SegmentWriter<Codec> {
|
||||
impl SegmentWriter {
|
||||
/// Creates a new `SegmentWriter`
|
||||
///
|
||||
/// The arguments are defined as follows
|
||||
@@ -71,10 +70,7 @@ impl<Codec: crate::codec::Codec> SegmentWriter<Codec> {
|
||||
/// behavior as a memory limit.
|
||||
/// - segment: The segment being written
|
||||
/// - schema
|
||||
pub fn for_segment(
|
||||
memory_budget_in_bytes: usize,
|
||||
segment: Segment<Codec>,
|
||||
) -> crate::Result<Self> {
|
||||
pub fn for_segment(memory_budget_in_bytes: usize, segment: Segment) -> crate::Result<Self> {
|
||||
let schema = segment.schema();
|
||||
let tokenizer_manager = segment.index().tokenizers().clone();
|
||||
let tokenizer_manager_fast_field = segment.index().fast_field_tokenizer().clone();
|
||||
@@ -390,13 +386,13 @@ impl<Codec: crate::codec::Codec> SegmentWriter<Codec> {
|
||||
/// to the `SegmentSerializer`.
|
||||
///
|
||||
/// `doc_id_map` is used to map to the new doc_id order.
|
||||
fn remap_and_write<C: Codec>(
|
||||
fn remap_and_write(
|
||||
schema: Schema,
|
||||
per_field_postings_writers: &PerFieldPostingsWriter,
|
||||
ctx: IndexingContext,
|
||||
fast_field_writers: FastFieldsWriter,
|
||||
fieldnorms_writer: &FieldNormsWriter,
|
||||
mut serializer: SegmentSerializer<C>,
|
||||
mut serializer: SegmentSerializer,
|
||||
) -> crate::Result<()> {
|
||||
debug!("remap-and-write");
|
||||
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
|
||||
@@ -875,7 +871,7 @@ mod tests {
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader = searcher.segment_reader(0u32);
|
||||
|
||||
fn assert_type(reader: &SegmentReader, field: &str, typ: ColumnType) {
|
||||
fn assert_type(reader: &dyn SegmentReader, field: &str, typ: ColumnType) {
|
||||
let cols = reader.fast_fields().dynamic_column_handles(field).unwrap();
|
||||
assert_eq!(cols.len(), 1, "{field}");
|
||||
assert_eq!(cols[0].column_type(), typ, "{field}");
|
||||
@@ -894,7 +890,7 @@ mod tests {
|
||||
assert_type(segment_reader, "json.my_arr", ColumnType::I64);
|
||||
assert_type(segment_reader, "json.my_arr.my_key", ColumnType::Str);
|
||||
|
||||
fn assert_empty(reader: &SegmentReader, field: &str) {
|
||||
fn assert_empty(reader: &dyn SegmentReader, field: &str) {
|
||||
let cols = reader.fast_fields().dynamic_column_handles(field).unwrap();
|
||||
assert_eq!(cols.len(), 0);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::codec::CodecConfiguration;
|
||||
use crate::indexer::operation::AddOperation;
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::indexer::SegmentWriter;
|
||||
@@ -8,22 +7,22 @@ use crate::schema::document::Document;
|
||||
use crate::{Directory, Index, IndexMeta, Opstamp, Segment, TantivyDocument};
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct SingleSegmentIndexWriter<Codec: crate::codec::Codec, D: Document = TantivyDocument> {
|
||||
segment_writer: SegmentWriter<Codec>,
|
||||
segment: Segment<Codec>,
|
||||
pub struct SingleSegmentIndexWriter<D: Document = TantivyDocument> {
|
||||
segment_writer: SegmentWriter,
|
||||
segment: Segment,
|
||||
opstamp: Opstamp,
|
||||
_doc: PhantomData<D>,
|
||||
_phantom: PhantomData<D>,
|
||||
}
|
||||
|
||||
impl<Codec: crate::codec::Codec, D: Document> SingleSegmentIndexWriter<Codec, D> {
|
||||
pub fn new(index: Index<Codec>, mem_budget: usize) -> crate::Result<Self> {
|
||||
impl<D: Document> SingleSegmentIndexWriter<D> {
|
||||
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
|
||||
let segment = index.new_segment();
|
||||
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
|
||||
Ok(Self {
|
||||
segment_writer,
|
||||
segment,
|
||||
opstamp: 0,
|
||||
_doc: PhantomData,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -38,10 +37,10 @@ impl<Codec: crate::codec::Codec, D: Document> SingleSegmentIndexWriter<Codec, D>
|
||||
.add_document(AddOperation { opstamp, document })
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> crate::Result<Index<Codec>> {
|
||||
pub fn finalize(self) -> crate::Result<Index> {
|
||||
let max_doc = self.segment_writer.max_doc();
|
||||
self.segment_writer.finalize()?;
|
||||
let segment: Segment<Codec> = self.segment.with_max_doc(max_doc);
|
||||
let segment: Segment = self.segment.with_max_doc(max_doc);
|
||||
let index = segment.index();
|
||||
let index_meta = IndexMeta {
|
||||
index_settings: index.settings().clone(),
|
||||
@@ -49,7 +48,6 @@ impl<Codec: crate::codec::Codec, D: Document> SingleSegmentIndexWriter<Codec, D>
|
||||
schema: index.schema(),
|
||||
opstamp: 0,
|
||||
payload: None,
|
||||
codec: CodecConfiguration::from_codec(index.codec()),
|
||||
};
|
||||
save_metas(&index_meta, index.directory())?;
|
||||
index.directory().sync_directory()?;
|
||||
|
||||
25
src/lib.rs
25
src/lib.rs
@@ -166,7 +166,6 @@ mod functional_test;
|
||||
|
||||
#[macro_use]
|
||||
mod macros;
|
||||
pub mod codec;
|
||||
mod future_result;
|
||||
|
||||
// Re-exports
|
||||
@@ -225,8 +224,9 @@ pub use self::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN, TERMINATED};
|
||||
pub use crate::core::{json_utils, Executor, Searcher, SearcherGeneration};
|
||||
pub use crate::directory::Directory;
|
||||
pub use crate::index::{
|
||||
Index, IndexBuilder, IndexMeta, IndexSettings, InvertedIndexReader, Order, Segment,
|
||||
SegmentMeta, SegmentReader,
|
||||
ArcInvertedIndexReader, ArcSegmentReader, Index, IndexBuilder, IndexMeta, IndexSettings,
|
||||
InvertedIndexReader, Order, Segment, SegmentMeta, SegmentReader, TantivyInvertedIndexReader,
|
||||
TantivySegmentReader,
|
||||
};
|
||||
pub use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
|
||||
pub use crate::schema::{Document, TantivyDocument, Term};
|
||||
@@ -378,7 +378,7 @@ pub mod tests {
|
||||
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use query_grammar::{UserInputAst, UserInputLeaf, UserInputLiteral};
|
||||
use rand::distributions::{Bernoulli, Uniform};
|
||||
use rand::distr::{Bernoulli, Uniform};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use time::OffsetDateTime;
|
||||
@@ -429,7 +429,7 @@ pub mod tests {
|
||||
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
|
||||
let seed: [u8; 32] = [1; 32];
|
||||
StdRng::from_seed(seed)
|
||||
.sample_iter(&Uniform::new(0u32, max_value))
|
||||
.sample_iter(&Uniform::new(0u32, max_value).unwrap())
|
||||
.take(n_elems)
|
||||
.collect::<Vec<u32>>()
|
||||
}
|
||||
@@ -524,11 +524,11 @@ pub mod tests {
|
||||
let searcher = index_reader.searcher();
|
||||
let reader = searcher.segment_reader(0);
|
||||
{
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field)?;
|
||||
let fieldnorm_reader = reader.fieldnorms_readers().get_field(text_field)?.unwrap();
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 3);
|
||||
}
|
||||
{
|
||||
let fieldnorm_reader = reader.get_fieldnorms_reader(title_field)?;
|
||||
let fieldnorm_reader = reader.fieldnorms_readers().get_field(title_field)?.unwrap();
|
||||
assert_eq!(fieldnorm_reader.fieldnorm_id(0), 0);
|
||||
}
|
||||
Ok(())
|
||||
@@ -546,15 +546,18 @@ pub mod tests {
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||
let segment_reader: &dyn SegmentReader = searcher.segment_reader(0);
|
||||
let fieldnorms_reader = segment_reader
|
||||
.fieldnorms_readers()
|
||||
.get_field(text_field)?
|
||||
.unwrap();
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(1), 0);
|
||||
assert_eq!(fieldnorms_reader.fieldnorm(2), 2);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn advance_undeleted(docset: &mut dyn DocSet, reader: &SegmentReader) -> bool {
|
||||
fn advance_undeleted(docset: &mut dyn DocSet, reader: &dyn SegmentReader) -> bool {
|
||||
let mut doc = docset.advance();
|
||||
while doc != TERMINATED {
|
||||
if !reader.is_deleted(doc) {
|
||||
@@ -1071,7 +1074,7 @@ pub mod tests {
|
||||
}
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let segment_reader: &SegmentReader = searcher.segment_reader(0);
|
||||
let segment_reader: &dyn SegmentReader = searcher.segment_reader(0);
|
||||
{
|
||||
let fast_field_reader_res = segment_reader.fast_fields().u64("text");
|
||||
assert!(fast_field_reader_res.is_err());
|
||||
|
||||
@@ -182,32 +182,6 @@ impl BlockSegmentPostings {
|
||||
self.freq_reading_option
|
||||
}
|
||||
|
||||
// Resets the block segment postings on another position
|
||||
// in the postings file.
|
||||
//
|
||||
// This is useful for enumerating through a list of terms,
|
||||
// and consuming the associated posting lists while avoiding
|
||||
// reallocating a `BlockSegmentPostings`.
|
||||
//
|
||||
// # Warning
|
||||
//
|
||||
// This does not reset the positions list.
|
||||
pub(crate) fn reset(&mut self, doc_freq: u32, postings_data: OwnedBytes) -> io::Result<()> {
|
||||
let (skip_data_opt, postings_data) =
|
||||
split_into_skips_and_postings(doc_freq, postings_data)?;
|
||||
self.data = postings_data;
|
||||
self.block_max_score_cache = None;
|
||||
self.block_loaded = false;
|
||||
if let Some(skip_data) = skip_data_opt {
|
||||
self.skip_reader.reset(skip_data, doc_freq);
|
||||
} else {
|
||||
self.skip_reader.reset(OwnedBytes::empty(), doc_freq);
|
||||
}
|
||||
self.doc_freq = doc_freq;
|
||||
self.load_block();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the overall number of documents in the block postings.
|
||||
/// It does not take in account whether documents are deleted or not.
|
||||
///
|
||||
@@ -521,40 +495,4 @@ mod tests {
|
||||
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_block_segment_postings() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let int_field = schema_builder.add_u64_field("id", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
// create two postings list, one containing even number,
|
||||
// the other containing odd numbers.
|
||||
for i in 0..6 {
|
||||
let doc = doc!(int_field=> (i % 2) as u64);
|
||||
index_writer.add_document(doc)?;
|
||||
}
|
||||
index_writer.commit()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
let mut block_segments;
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 0u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||
let term_info = inverted_index.get_term_info(&term)?.unwrap();
|
||||
block_segments = inverted_index
|
||||
.read_block_postings_from_terminfo(&term_info, IndexRecordOption::Basic)?;
|
||||
}
|
||||
assert_eq!(block_segments.docs(), &[0, 2, 4]);
|
||||
{
|
||||
let term = Term::from_field_u64(int_field, 1u64);
|
||||
let inverted_index = segment_reader.inverted_index(int_field)?;
|
||||
let term_info = inverted_index.get_term_info(&term)?.unwrap();
|
||||
inverted_index.reset_block_postings_from_terminfo(&term_info, &mut block_segments)?;
|
||||
}
|
||||
assert_eq!(block_segments.docs(), &[1, 3, 5]);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -397,7 +397,10 @@ mod bench {
|
||||
let mut seed: [u8; 32] = [0; 32];
|
||||
seed[31] = seed_val;
|
||||
let mut rng = StdRng::from_seed(seed);
|
||||
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
|
||||
(0u32..)
|
||||
.filter(|_| rng.random_bool(ratio))
|
||||
.take(n)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn generate_array(n: usize, ratio: f64) -> Vec<u32> {
|
||||
|
||||
@@ -22,6 +22,12 @@ pub(crate) struct JsonPostingsWriter<Rec: Recorder> {
|
||||
non_str_posting_writer: SpecializedPostingsWriter<DocIdRecorder>,
|
||||
}
|
||||
|
||||
impl<Rec: Recorder> From<JsonPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
|
||||
fn from(json_postings_writer: JsonPostingsWriter<Rec>) -> Box<dyn PostingsWriter> {
|
||||
Box::new(json_postings_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
|
||||
#[inline]
|
||||
fn subscribe(
|
||||
|
||||
@@ -46,7 +46,7 @@ pub(crate) mod tests {
|
||||
use super::{InvertedIndexSerializer, Postings};
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::index::{Index, SegmentComponent, SegmentReader};
|
||||
use crate::index::{Index, SegmentComponent, SegmentReader, TantivySegmentReader};
|
||||
use crate::indexer::operation::AddOperation;
|
||||
use crate::indexer::SegmentWriter;
|
||||
use crate::query::Scorer;
|
||||
@@ -258,9 +258,12 @@ pub(crate) mod tests {
|
||||
segment_writer.finalize()?;
|
||||
}
|
||||
{
|
||||
let segment_reader = SegmentReader::open(&segment)?;
|
||||
let segment_reader = TantivySegmentReader::open(&segment)?;
|
||||
{
|
||||
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(text_field)?;
|
||||
let fieldnorm_reader = segment_reader
|
||||
.fieldnorms_readers()
|
||||
.get_field(text_field)?
|
||||
.unwrap();
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(0), 8 + 5);
|
||||
assert_eq!(fieldnorm_reader.fieldnorm(1), 2);
|
||||
for i in 2..1000 {
|
||||
@@ -604,13 +607,13 @@ mod bench {
|
||||
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
|
||||
for _ in 0..posting_list_size {
|
||||
let mut doc = TantivyDocument::default();
|
||||
if rng.gen_bool(1f64 / 15f64) {
|
||||
if rng.random_bool(1f64 / 15f64) {
|
||||
doc.add_text(text_field, "a");
|
||||
}
|
||||
if rng.gen_bool(1f64 / 10f64) {
|
||||
if rng.random_bool(1f64 / 10f64) {
|
||||
doc.add_text(text_field, "b");
|
||||
}
|
||||
if rng.gen_bool(1f64 / 5f64) {
|
||||
if rng.random_bool(1f64 / 5f64) {
|
||||
doc.add_text(text_field, "c");
|
||||
}
|
||||
doc.add_text(text_field, "d");
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
use crate::postings::json_postings_writer::JsonPostingsWriter;
|
||||
use crate::postings::postings_writer::{PostingsWriterEnum, SpecializedPostingsWriter};
|
||||
use crate::postings::postings_writer::SpecializedPostingsWriter;
|
||||
use crate::postings::recorder::{DocIdRecorder, TermFrequencyRecorder, TfAndPositionRecorder};
|
||||
use crate::postings::PostingsWriter;
|
||||
use crate::schema::{Field, FieldEntry, FieldType, IndexRecordOption, Schema};
|
||||
|
||||
pub(crate) struct PerFieldPostingsWriter {
|
||||
per_field_postings_writers: Vec<PostingsWriterEnum>,
|
||||
per_field_postings_writers: Vec<Box<dyn PostingsWriter>>,
|
||||
}
|
||||
|
||||
impl PerFieldPostingsWriter {
|
||||
pub fn for_schema(schema: &Schema) -> Self {
|
||||
let per_field_postings_writers: Vec<PostingsWriterEnum> = schema
|
||||
let per_field_postings_writers = schema
|
||||
.fields()
|
||||
.map(|(_, field_entry)| posting_writer_from_field_entry(field_entry))
|
||||
.collect();
|
||||
@@ -18,16 +19,16 @@ impl PerFieldPostingsWriter {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_for_field(&self, field: Field) -> &PostingsWriterEnum {
|
||||
&self.per_field_postings_writers[field.field_id() as usize]
|
||||
pub(crate) fn get_for_field(&self, field: Field) -> &dyn PostingsWriter {
|
||||
self.per_field_postings_writers[field.field_id() as usize].as_ref()
|
||||
}
|
||||
|
||||
pub(crate) fn get_for_field_mut(&mut self, field: Field) -> &mut PostingsWriterEnum {
|
||||
&mut self.per_field_postings_writers[field.field_id() as usize]
|
||||
pub(crate) fn get_for_field_mut(&mut self, field: Field) -> &mut dyn PostingsWriter {
|
||||
self.per_field_postings_writers[field.field_id() as usize].as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> PostingsWriterEnum {
|
||||
fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter> {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::Str(ref text_options) => text_options
|
||||
.get_indexing_options()
|
||||
@@ -50,7 +51,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> PostingsWriterEn
|
||||
| FieldType::Date(_)
|
||||
| FieldType::Bytes(_)
|
||||
| FieldType::IpAddr(_)
|
||||
| FieldType::Facet(_) => <SpecializedPostingsWriter<DocIdRecorder>>::default().into(),
|
||||
| FieldType::Facet(_) => Box::<SpecializedPostingsWriter<DocIdRecorder>>::default(),
|
||||
FieldType::JsonObject(ref json_object_options) => {
|
||||
if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() {
|
||||
match text_indexing_option.index_option() {
|
||||
|
||||
@@ -7,10 +7,7 @@ use stacker::Addr;
|
||||
use crate::fieldnorm::FieldNormReaders;
|
||||
use crate::indexer::indexing_term::IndexingTerm;
|
||||
use crate::indexer::path_to_unordered_id::OrderedPathId;
|
||||
use crate::postings::json_postings_writer::JsonPostingsWriter;
|
||||
use crate::postings::recorder::{
|
||||
BufferLender, DocIdRecorder, Recorder, TermFrequencyRecorder, TfAndPositionRecorder,
|
||||
};
|
||||
use crate::postings::recorder::{BufferLender, Recorder};
|
||||
use crate::postings::{
|
||||
FieldSerializer, IndexingContext, InvertedIndexSerializer, PerFieldPostingsWriter,
|
||||
};
|
||||
@@ -103,141 +100,6 @@ pub(crate) struct IndexingPosition {
|
||||
pub end_position: u32,
|
||||
}
|
||||
|
||||
pub enum PostingsWriterEnum {
|
||||
DocId(SpecializedPostingsWriter<DocIdRecorder>),
|
||||
DocIdTf(SpecializedPostingsWriter<TermFrequencyRecorder>),
|
||||
DocTfAndPosition(SpecializedPostingsWriter<TfAndPositionRecorder>),
|
||||
JsonDocId(JsonPostingsWriter<DocIdRecorder>),
|
||||
JsonDocIdTf(JsonPostingsWriter<TermFrequencyRecorder>),
|
||||
JsonDocTfAndPosition(JsonPostingsWriter<TfAndPositionRecorder>),
|
||||
}
|
||||
|
||||
impl From<SpecializedPostingsWriter<DocIdRecorder>> for PostingsWriterEnum {
|
||||
fn from(doc_id_recorder_writer: SpecializedPostingsWriter<DocIdRecorder>) -> Self {
|
||||
PostingsWriterEnum::DocId(doc_id_recorder_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SpecializedPostingsWriter<TermFrequencyRecorder>> for PostingsWriterEnum {
|
||||
fn from(doc_id_tf_recorder_writer: SpecializedPostingsWriter<TermFrequencyRecorder>) -> Self {
|
||||
PostingsWriterEnum::DocIdTf(doc_id_tf_recorder_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SpecializedPostingsWriter<TfAndPositionRecorder>> for PostingsWriterEnum {
|
||||
fn from(
|
||||
doc_id_tf_and_positions_recorder_writer: SpecializedPostingsWriter<TfAndPositionRecorder>,
|
||||
) -> Self {
|
||||
PostingsWriterEnum::DocTfAndPosition(doc_id_tf_and_positions_recorder_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonPostingsWriter<DocIdRecorder>> for PostingsWriterEnum {
|
||||
fn from(doc_id_recorder_writer: JsonPostingsWriter<DocIdRecorder>) -> Self {
|
||||
PostingsWriterEnum::JsonDocId(doc_id_recorder_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonPostingsWriter<TermFrequencyRecorder>> for PostingsWriterEnum {
|
||||
fn from(doc_id_tf_recorder_writer: JsonPostingsWriter<TermFrequencyRecorder>) -> Self {
|
||||
PostingsWriterEnum::JsonDocIdTf(doc_id_tf_recorder_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonPostingsWriter<TfAndPositionRecorder>> for PostingsWriterEnum {
|
||||
fn from(
|
||||
doc_id_tf_and_positions_recorder_writer: JsonPostingsWriter<TfAndPositionRecorder>,
|
||||
) -> Self {
|
||||
PostingsWriterEnum::JsonDocTfAndPosition(doc_id_tf_and_positions_recorder_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl PostingsWriter for PostingsWriterEnum {
|
||||
fn subscribe(&mut self, doc: DocId, pos: u32, term: &IndexingTerm, ctx: &mut IndexingContext) {
|
||||
match self {
|
||||
PostingsWriterEnum::DocId(writer) => writer.subscribe(doc, pos, term, ctx),
|
||||
PostingsWriterEnum::DocIdTf(writer) => writer.subscribe(doc, pos, term, ctx),
|
||||
PostingsWriterEnum::DocTfAndPosition(writer) => writer.subscribe(doc, pos, term, ctx),
|
||||
PostingsWriterEnum::JsonDocId(writer) => writer.subscribe(doc, pos, term, ctx),
|
||||
PostingsWriterEnum::JsonDocIdTf(writer) => writer.subscribe(doc, pos, term, ctx),
|
||||
PostingsWriterEnum::JsonDocTfAndPosition(writer) => {
|
||||
writer.subscribe(doc, pos, term, ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize(
|
||||
&self,
|
||||
term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
|
||||
ordered_id_to_path: &[&str],
|
||||
ctx: &IndexingContext,
|
||||
serializer: &mut FieldSerializer,
|
||||
) -> io::Result<()> {
|
||||
match self {
|
||||
PostingsWriterEnum::DocId(writer) => {
|
||||
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
|
||||
}
|
||||
PostingsWriterEnum::DocIdTf(writer) => {
|
||||
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
|
||||
}
|
||||
PostingsWriterEnum::DocTfAndPosition(writer) => {
|
||||
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
|
||||
}
|
||||
PostingsWriterEnum::JsonDocId(writer) => {
|
||||
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
|
||||
}
|
||||
PostingsWriterEnum::JsonDocIdTf(writer) => {
|
||||
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
|
||||
}
|
||||
PostingsWriterEnum::JsonDocTfAndPosition(writer) => {
|
||||
writer.serialize(term_addrs, ordered_id_to_path, ctx, serializer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tokenize a text and subscribe all of its token.
|
||||
fn index_text(
|
||||
&mut self,
|
||||
doc_id: DocId,
|
||||
token_stream: &mut dyn TokenStream,
|
||||
term_buffer: &mut IndexingTerm,
|
||||
ctx: &mut IndexingContext,
|
||||
indexing_position: &mut IndexingPosition,
|
||||
) {
|
||||
match self {
|
||||
PostingsWriterEnum::DocId(writer) => {
|
||||
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
|
||||
}
|
||||
PostingsWriterEnum::DocIdTf(writer) => {
|
||||
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
|
||||
}
|
||||
PostingsWriterEnum::DocTfAndPosition(writer) => {
|
||||
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
|
||||
}
|
||||
PostingsWriterEnum::JsonDocId(writer) => {
|
||||
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
|
||||
}
|
||||
PostingsWriterEnum::JsonDocIdTf(writer) => {
|
||||
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
|
||||
}
|
||||
PostingsWriterEnum::JsonDocTfAndPosition(writer) => {
|
||||
writer.index_text(doc_id, token_stream, term_buffer, ctx, indexing_position)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn total_num_tokens(&self) -> u64 {
|
||||
match self {
|
||||
PostingsWriterEnum::DocId(writer) => writer.total_num_tokens(),
|
||||
PostingsWriterEnum::DocIdTf(writer) => writer.total_num_tokens(),
|
||||
PostingsWriterEnum::DocTfAndPosition(writer) => writer.total_num_tokens(),
|
||||
PostingsWriterEnum::JsonDocId(writer) => writer.total_num_tokens(),
|
||||
PostingsWriterEnum::JsonDocIdTf(writer) => writer.total_num_tokens(),
|
||||
PostingsWriterEnum::JsonDocTfAndPosition(writer) => writer.total_num_tokens(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The `PostingsWriter` is in charge of receiving documenting
|
||||
/// and building a `Segment` in anonymous memory.
|
||||
///
|
||||
@@ -309,6 +171,14 @@ pub(crate) struct SpecializedPostingsWriter<Rec: Recorder> {
|
||||
_recorder_type: PhantomData<Rec>,
|
||||
}
|
||||
|
||||
impl<Rec: Recorder> From<SpecializedPostingsWriter<Rec>> for Box<dyn PostingsWriter> {
|
||||
fn from(
|
||||
specialized_postings_writer: SpecializedPostingsWriter<Rec>,
|
||||
) -> Box<dyn PostingsWriter> {
|
||||
Box::new(specialized_postings_writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
|
||||
#[inline]
|
||||
pub(crate) fn serialize_one_term(
|
||||
|
||||
@@ -55,9 +55,7 @@ pub struct InvertedIndexSerializer {
|
||||
|
||||
impl InvertedIndexSerializer {
|
||||
/// Open a new `InvertedIndexSerializer` for the given segment
|
||||
pub fn open<C: crate::codec::Codec>(
|
||||
segment: &mut Segment<C>,
|
||||
) -> crate::Result<InvertedIndexSerializer> {
|
||||
pub fn open(segment: &mut Segment) -> crate::Result<InvertedIndexSerializer> {
|
||||
use crate::index::SegmentComponent::{Positions, Postings, Terms};
|
||||
let inv_index_serializer = InvertedIndexSerializer {
|
||||
terms_write: CompositeWrite::wrap(segment.open_write(Terms)?),
|
||||
|
||||
@@ -142,23 +142,6 @@ impl SkipReader {
|
||||
skip_reader
|
||||
}
|
||||
|
||||
pub fn reset(&mut self, data: OwnedBytes, doc_freq: u32) {
|
||||
self.last_doc_in_block = if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
0
|
||||
} else {
|
||||
TERMINATED
|
||||
};
|
||||
self.last_doc_in_previous_block = 0u32;
|
||||
self.owned_read = data;
|
||||
self.block_info = BlockInfo::VInt { num_docs: doc_freq };
|
||||
self.byte_offset = 0;
|
||||
self.remaining_docs = doc_freq;
|
||||
self.position_offset = 0u64;
|
||||
if doc_freq >= COMPRESSION_BLOCK_SIZE as u32 {
|
||||
self.read_block_info();
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the block max score for this block if available.
|
||||
//
|
||||
// The block max score is available for all full bitpacked block,
|
||||
|
||||
@@ -21,7 +21,7 @@ impl Query for AllQuery {
|
||||
pub struct AllWeight;
|
||||
|
||||
impl Weight for AllWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let all_scorer = AllScorer::new(reader.max_doc());
|
||||
if boost != 1.0 {
|
||||
Ok(Box::new(BoostScorer::new(all_scorer, boost)))
|
||||
@@ -30,7 +30,7 @@ impl Weight for AllWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
if doc >= reader.max_doc() {
|
||||
return Err(does_not_match(doc));
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ where
|
||||
}
|
||||
|
||||
/// Returns the term infos that match the automaton
|
||||
pub fn get_match_term_infos(&self, reader: &SegmentReader) -> crate::Result<Vec<TermInfo>> {
|
||||
pub fn get_match_term_infos(&self, reader: &dyn SegmentReader) -> crate::Result<Vec<TermInfo>> {
|
||||
let inverted_index = reader.inverted_index(self.field)?;
|
||||
let term_dict = inverted_index.terms();
|
||||
let mut term_stream = self.automaton_stream(term_dict)?;
|
||||
@@ -84,7 +84,7 @@ where
|
||||
A: Automaton + Send + Sync + 'static,
|
||||
A::State: Clone,
|
||||
{
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
let inverted_index = reader.inverted_index(self.field)?;
|
||||
@@ -110,7 +110,7 @@ where
|
||||
Ok(Box::new(const_scorer))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) == doc {
|
||||
Ok(Explanation::new("AutomatonScorer", 1.0))
|
||||
|
||||
@@ -205,7 +205,7 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
|
||||
|
||||
fn per_occur_scorers(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
boost: Score,
|
||||
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
|
||||
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
|
||||
@@ -221,7 +221,7 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
|
||||
|
||||
fn complex_scorer<TComplexScoreCombiner: ScoreCombiner>(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
boost: Score,
|
||||
score_combiner_fn: impl Fn() -> TComplexScoreCombiner,
|
||||
) -> crate::Result<SpecializedScorer> {
|
||||
@@ -418,7 +418,7 @@ fn remove_and_count_all_and_empty_scorers(
|
||||
}
|
||||
|
||||
impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombiner> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let num_docs = reader.num_docs();
|
||||
if self.weights.is_empty() {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
@@ -442,7 +442,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
@@ -464,7 +464,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
|
||||
fn for_each(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
|
||||
@@ -486,7 +486,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
|
||||
@@ -521,7 +521,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
fn for_each_pruning(
|
||||
&self,
|
||||
threshold: Score,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::fmt;
|
||||
|
||||
use crate::docset::COLLECT_BLOCK_BUFFER_LEN;
|
||||
use crate::docset::{SeekDangerResult, COLLECT_BLOCK_BUFFER_LEN};
|
||||
use crate::fastfield::AliveBitSet;
|
||||
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight};
|
||||
use crate::{DocId, DocSet, Score, SegmentReader, Term};
|
||||
@@ -67,11 +67,11 @@ impl BoostWeight {
|
||||
}
|
||||
|
||||
impl Weight for BoostWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
self.weight.scorer(reader, boost * self.boost)
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
let underlying_explanation = self.weight.explain(reader, doc)?;
|
||||
let score = underlying_explanation.value() * self.boost;
|
||||
let mut explanation =
|
||||
@@ -80,7 +80,7 @@ impl Weight for BoostWeight {
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
fn count(&self, reader: &dyn SegmentReader) -> crate::Result<u32> {
|
||||
self.weight.count(reader)
|
||||
}
|
||||
}
|
||||
@@ -104,8 +104,8 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
self.underlying.seek(target)
|
||||
}
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
self.underlying.seek_into_the_danger_zone(target)
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
self.underlying.seek_danger(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId; COLLECT_BLOCK_BUFFER_LEN]) -> usize {
|
||||
|
||||
@@ -63,12 +63,12 @@ impl ConstWeight {
|
||||
}
|
||||
|
||||
impl Weight for ConstWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let inner_scorer = self.weight.scorer(reader, boost)?;
|
||||
Ok(Box::new(ConstScorer::new(inner_scorer, boost * self.score)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
@@ -81,7 +81,7 @@ impl Weight for ConstWeight {
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
fn count(&self, reader: &dyn SegmentReader) -> crate::Result<u32> {
|
||||
self.weight.count(reader)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
|
||||
use crate::docset::SeekDangerResult;
|
||||
use crate::query::score_combiner::DoNothingCombiner;
|
||||
use crate::query::{ScoreCombiner, Scorer};
|
||||
use crate::{DocId, DocSet, Score, TERMINATED};
|
||||
@@ -67,10 +68,12 @@ impl<T: Scorer> DocSet for ScorerWrapper<T> {
|
||||
self.current_doc = doc_id;
|
||||
doc_id
|
||||
}
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
let found = self.scorer.seek_into_the_danger_zone(target);
|
||||
self.current_doc = self.scorer.doc();
|
||||
found
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
let result = self.scorer.seek_danger(target);
|
||||
if result == SeekDangerResult::Found {
|
||||
self.current_doc = target;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
|
||||
@@ -26,11 +26,11 @@ impl Query for EmptyQuery {
|
||||
/// It is useful for tests and handling edge cases.
|
||||
pub struct EmptyWeight;
|
||||
impl Weight for EmptyWeight {
|
||||
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, _reader: &dyn SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
}
|
||||
|
||||
fn explain(&self, _reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, _reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
Err(does_not_match(doc))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ pub struct ExistsWeight {
|
||||
}
|
||||
|
||||
impl Weight for ExistsWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let fast_field_reader = reader.fast_fields();
|
||||
let mut column_handles = fast_field_reader.dynamic_column_handles(&self.field_name)?;
|
||||
if self.field_type == Type::Json && self.json_subpaths {
|
||||
@@ -165,7 +165,7 @@ impl Weight for ExistsWeight {
|
||||
Ok(Box::new(ConstScorer::new(docset, boost)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use super::size_hint::estimate_intersection;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
|
||||
use crate::query::term_query::TermScorer;
|
||||
use crate::query::{EmptyScorer, Scorer};
|
||||
use crate::{DocId, Score};
|
||||
@@ -108,46 +108,63 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
|
||||
#[inline]
|
||||
fn advance(&mut self) -> DocId {
|
||||
let (left, right) = (&mut self.left, &mut self.right);
|
||||
let mut candidate = left.advance();
|
||||
if candidate == TERMINATED {
|
||||
return TERMINATED;
|
||||
}
|
||||
|
||||
loop {
|
||||
// In the first part we look for a document in the intersection
|
||||
// of the two rarest `DocSet` in the intersection.
|
||||
// Invariant:
|
||||
// - candidate is always <= to the next document in the intersection.
|
||||
// - candidate strictly increases at every occurence of the loop.
|
||||
let mut candidate = 0;
|
||||
|
||||
loop {
|
||||
if right.seek_into_the_danger_zone(candidate) {
|
||||
break;
|
||||
}
|
||||
let right_doc = right.doc();
|
||||
// TODO: Think about which value would make sense here
|
||||
// It depends on the DocSet implementation, when a seek would outweigh an advance.
|
||||
if right_doc > candidate.wrapping_add(100) {
|
||||
candidate = left.seek(right_doc);
|
||||
} else {
|
||||
candidate = left.advance();
|
||||
}
|
||||
if candidate == TERMINATED {
|
||||
return TERMINATED;
|
||||
}
|
||||
}
|
||||
// Termination: candidate strictly increases.
|
||||
'outer: while candidate < TERMINATED {
|
||||
// As we enter the loop, we should always have candidate < next_doc.
|
||||
|
||||
debug_assert_eq!(left.doc(), right.doc());
|
||||
// test the remaining scorers
|
||||
if self
|
||||
.others
|
||||
.iter_mut()
|
||||
.all(|docset| docset.seek_into_the_danger_zone(candidate))
|
||||
// This step always increases candidate.
|
||||
//
|
||||
// TODO: Think about which value would make sense here
|
||||
// It depends on the DocSet implementation, when a seek would outweigh an advance.
|
||||
candidate = if candidate > left.doc().wrapping_add(100) {
|
||||
left.seek(candidate)
|
||||
} else {
|
||||
left.advance()
|
||||
};
|
||||
|
||||
// Left is positionned on `candidate`.
|
||||
debug_assert_eq!(left.doc(), candidate);
|
||||
|
||||
if let SeekDangerResult::SeekLowerBound(seek_lower_bound) = right.seek_danger(candidate)
|
||||
{
|
||||
debug_assert_eq!(candidate, self.left.doc());
|
||||
debug_assert_eq!(candidate, self.right.doc());
|
||||
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
|
||||
return candidate;
|
||||
// The max is technically useless but it makes the invariant
|
||||
// easier to proofread.
|
||||
debug_assert!(seek_lower_bound >= candidate);
|
||||
candidate = seek_lower_bound;
|
||||
continue;
|
||||
}
|
||||
candidate = left.advance();
|
||||
|
||||
// Left and right are positionned on `candidate`.
|
||||
debug_assert_eq!(right.doc(), candidate);
|
||||
|
||||
for other in &mut self.others {
|
||||
if let SeekDangerResult::SeekLowerBound(seek_lower_bound) =
|
||||
other.seek_danger(candidate)
|
||||
{
|
||||
// One of the scorer does not match, let's restart at the top of the loop.
|
||||
debug_assert!(seek_lower_bound >= candidate);
|
||||
candidate = seek_lower_bound;
|
||||
continue 'outer;
|
||||
}
|
||||
}
|
||||
|
||||
// At this point all scorers are in a valid state, aligned on the next document in the
|
||||
// intersection.
|
||||
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
|
||||
return candidate;
|
||||
}
|
||||
|
||||
// We make sure our docset is in a valid state.
|
||||
// In particular, we want .doc() to return TERMINATED.
|
||||
left.seek(TERMINATED);
|
||||
|
||||
TERMINATED
|
||||
}
|
||||
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
@@ -166,13 +183,19 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
|
||||
///
|
||||
/// Some implementations may choose to advance past the target if beneficial for performance.
|
||||
/// The return value is `true` if the target is in the docset, and `false` otherwise.
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
self.left.seek_into_the_danger_zone(target)
|
||||
&& self.right.seek_into_the_danger_zone(target)
|
||||
&& self
|
||||
.others
|
||||
.iter_mut()
|
||||
.all(|docset| docset.seek_into_the_danger_zone(target))
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
if let SeekDangerResult::SeekLowerBound(new_target) = self.left.seek_danger(target) {
|
||||
return SeekDangerResult::SeekLowerBound(new_target);
|
||||
}
|
||||
if let SeekDangerResult::SeekLowerBound(new_target) = self.right.seek_danger(target) {
|
||||
return SeekDangerResult::SeekLowerBound(new_target);
|
||||
}
|
||||
for docset in &mut self.others {
|
||||
if let SeekDangerResult::SeekLowerBound(new_target) = docset.seek_danger(target) {
|
||||
return SeekDangerResult::SeekLowerBound(new_target);
|
||||
}
|
||||
}
|
||||
SeekDangerResult::Found
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -304,6 +327,58 @@ mod tests {
|
||||
assert_eq!(intersection.doc(), TERMINATED);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_abc() {
|
||||
let a = VecDocSet::from(vec![2, 3, 6]);
|
||||
let b = VecDocSet::from(vec![1, 3, 5]);
|
||||
let c = VecDocSet::from(vec![1, 3, 5]);
|
||||
let mut intersection = Intersection::new(vec![c, b, a], 10);
|
||||
let mut docs = Vec::new();
|
||||
use crate::DocSet;
|
||||
while intersection.doc() != TERMINATED {
|
||||
docs.push(intersection.doc());
|
||||
intersection.advance();
|
||||
}
|
||||
assert_eq!(&docs, &[3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intersection_termination() {
|
||||
use crate::query::score_combiner::DoNothingCombiner;
|
||||
use crate::query::{BufferedUnionScorer, ConstScorer, VecDocSet};
|
||||
|
||||
let a1 = ConstScorer::new(VecDocSet::from(vec![0u32, 10000]), 1.0);
|
||||
let a2 = ConstScorer::new(VecDocSet::from(vec![0u32, 10000]), 1.0);
|
||||
|
||||
let mut b_scorers = vec![];
|
||||
for _ in 0..2 {
|
||||
// Union matches 0 and 10000.
|
||||
b_scorers.push(ConstScorer::new(VecDocSet::from(vec![0, 10000]), 1.0));
|
||||
}
|
||||
// That's the union of two scores matching 0, and 10_000.
|
||||
let union = BufferedUnionScorer::build(b_scorers, DoNothingCombiner::default, 30000);
|
||||
|
||||
// Mismatching scorer: matches 0 and 20000. We then append more docs at the end to ensure it
|
||||
// is last.
|
||||
let mut m_docs = vec![0, 20000];
|
||||
for i in 30000..30100 {
|
||||
m_docs.push(i);
|
||||
}
|
||||
let m = ConstScorer::new(VecDocSet::from(m_docs), 1.0);
|
||||
|
||||
// Costs: A1=2, A2=2, Union=4, M=102.
|
||||
// Sorted: A1, A2, Union, M.
|
||||
// Left=A1, Right=A2, Others=[Union, M].
|
||||
let mut intersection = crate::query::intersect_scorers(
|
||||
vec![Box::new(a1), Box::new(a2), Box::new(union), Box::new(m)],
|
||||
40000,
|
||||
);
|
||||
|
||||
while intersection.doc() != TERMINATED {
|
||||
intersection.advance();
|
||||
}
|
||||
}
|
||||
|
||||
// Strategy to generate sorted and deduplicated vectors of u32 document IDs
|
||||
fn sorted_deduped_vec(max_val: u32, max_size: usize) -> impl Strategy<Value = Vec<u32>> {
|
||||
prop::collection::vec(0..max_val, 0..max_size).prop_map(|mut vec| {
|
||||
@@ -335,6 +410,5 @@ mod tests {
|
||||
}
|
||||
assert_eq!(intersection.doc(), TERMINATED);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::Postings;
|
||||
use crate::query::bm25::Bm25Weight;
|
||||
@@ -194,11 +194,16 @@ impl<TPostings: Postings> DocSet for PhrasePrefixScorer<TPostings> {
|
||||
self.advance()
|
||||
}
|
||||
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
if self.phrase_scorer.seek_into_the_danger_zone(target) {
|
||||
self.matches_prefix()
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
let seek_res = self.phrase_scorer.seek_danger(target);
|
||||
if seek_res != SeekDangerResult::Found {
|
||||
return seek_res;
|
||||
}
|
||||
// The intersection matched. Now let's see if we match the prefix.
|
||||
if self.matches_prefix() {
|
||||
SeekDangerResult::Found
|
||||
} else {
|
||||
false
|
||||
SeekDangerResult::SeekLowerBound(target + 1)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ impl PhrasePrefixWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn fieldnorm_reader(&self, reader: &SegmentReader) -> crate::Result<FieldNormReader> {
|
||||
fn fieldnorm_reader(&self, reader: &dyn SegmentReader) -> crate::Result<FieldNormReader> {
|
||||
let field = self.phrase_terms[0].1.field();
|
||||
if self.similarity_weight_opt.is_some() {
|
||||
if let Some(fieldnorm_reader) = reader.fieldnorms_readers().get_field(field)? {
|
||||
@@ -44,7 +44,7 @@ impl PhrasePrefixWeight {
|
||||
|
||||
pub(crate) fn phrase_scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
boost: Score,
|
||||
) -> crate::Result<Option<PhrasePrefixScorer<SegmentPostings>>> {
|
||||
let similarity_weight_opt = self
|
||||
@@ -114,7 +114,7 @@ impl PhrasePrefixWeight {
|
||||
}
|
||||
|
||||
impl Weight for PhrasePrefixWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||
Ok(Box::new(scorer))
|
||||
} else {
|
||||
@@ -122,7 +122,7 @@ impl Weight for PhrasePrefixWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
|
||||
if scorer_opt.is_none() {
|
||||
return Err(does_not_match(doc));
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
|
||||
use crate::fieldnorm::FieldNormReader;
|
||||
use crate::postings::Postings;
|
||||
use crate::query::bm25::Bm25Weight;
|
||||
@@ -530,12 +530,18 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
|
||||
self.advance()
|
||||
}
|
||||
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
debug_assert!(target >= self.doc());
|
||||
if self.intersection_docset.seek_into_the_danger_zone(target) && self.phrase_match() {
|
||||
return true;
|
||||
let seek_res = self.intersection_docset.seek_danger(target);
|
||||
if seek_res != SeekDangerResult::Found {
|
||||
return seek_res;
|
||||
}
|
||||
// The intersection matched. Now let's see if we match the phrase.
|
||||
if self.phrase_match() {
|
||||
SeekDangerResult::Found
|
||||
} else {
|
||||
SeekDangerResult::SeekLowerBound(target + 1)
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
|
||||
@@ -29,7 +29,7 @@ impl PhraseWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn fieldnorm_reader(&self, reader: &SegmentReader) -> crate::Result<FieldNormReader> {
|
||||
fn fieldnorm_reader(&self, reader: &dyn SegmentReader) -> crate::Result<FieldNormReader> {
|
||||
let field = self.phrase_terms[0].1.field();
|
||||
if self.similarity_weight_opt.is_some() {
|
||||
if let Some(fieldnorm_reader) = reader.fieldnorms_readers().get_field(field)? {
|
||||
@@ -41,7 +41,7 @@ impl PhraseWeight {
|
||||
|
||||
pub(crate) fn phrase_scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
boost: Score,
|
||||
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> {
|
||||
let similarity_weight_opt = self
|
||||
@@ -74,7 +74,7 @@ impl PhraseWeight {
|
||||
}
|
||||
|
||||
impl Weight for PhraseWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||
Ok(Box::new(scorer))
|
||||
} else {
|
||||
@@ -82,7 +82,7 @@ impl Weight for PhraseWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
|
||||
if scorer_opt.is_none() {
|
||||
return Err(does_not_match(doc));
|
||||
|
||||
@@ -45,7 +45,7 @@ impl RegexPhraseWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn fieldnorm_reader(&self, reader: &SegmentReader) -> crate::Result<FieldNormReader> {
|
||||
fn fieldnorm_reader(&self, reader: &dyn SegmentReader) -> crate::Result<FieldNormReader> {
|
||||
if self.similarity_weight_opt.is_some() {
|
||||
if let Some(fieldnorm_reader) = reader.fieldnorms_readers().get_field(self.field)? {
|
||||
return Ok(fieldnorm_reader);
|
||||
@@ -56,7 +56,7 @@ impl RegexPhraseWeight {
|
||||
|
||||
pub(crate) fn phrase_scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
boost: Score,
|
||||
) -> crate::Result<Option<PhraseScorer<UnionType>>> {
|
||||
let similarity_weight_opt = self
|
||||
@@ -84,7 +84,8 @@ impl RegexPhraseWeight {
|
||||
"Phrase query exceeded max expansions {num_terms}"
|
||||
)));
|
||||
}
|
||||
let union = Self::get_union_from_term_infos(&term_infos, reader, &inverted_index)?;
|
||||
let union =
|
||||
Self::get_union_from_term_infos(&term_infos, reader, inverted_index.as_ref())?;
|
||||
|
||||
posting_lists.push((offset, union));
|
||||
}
|
||||
@@ -99,7 +100,7 @@ impl RegexPhraseWeight {
|
||||
|
||||
/// Add all docs of the term to the docset
|
||||
fn add_to_bitset(
|
||||
inverted_index: &InvertedIndexReader,
|
||||
inverted_index: &dyn InvertedIndexReader,
|
||||
term_info: &TermInfo,
|
||||
doc_bitset: &mut BitSet,
|
||||
) -> crate::Result<()> {
|
||||
@@ -174,8 +175,8 @@ impl RegexPhraseWeight {
|
||||
/// Use Roaring Bitmaps for sparse terms. The full bitvec is main memory consumer currently.
|
||||
pub(crate) fn get_union_from_term_infos(
|
||||
term_infos: &[TermInfo],
|
||||
reader: &SegmentReader,
|
||||
inverted_index: &InvertedIndexReader,
|
||||
reader: &dyn SegmentReader,
|
||||
inverted_index: &dyn InvertedIndexReader,
|
||||
) -> crate::Result<UnionType> {
|
||||
let max_doc = reader.max_doc();
|
||||
|
||||
@@ -269,7 +270,7 @@ impl RegexPhraseWeight {
|
||||
}
|
||||
|
||||
impl Weight for RegexPhraseWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
|
||||
Ok(Box::new(scorer))
|
||||
} else {
|
||||
@@ -277,7 +278,7 @@ impl Weight for RegexPhraseWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
|
||||
if scorer_opt.is_none() {
|
||||
return Err(does_not_match(doc));
|
||||
@@ -311,7 +312,7 @@ mod tests {
|
||||
#![proptest_config(ProptestConfig::with_cases(50))]
|
||||
#[test]
|
||||
fn test_phrase_regex_with_random_strings(mut random_strings in proptest::collection::vec("[c-z ]{0,10}", 1..100), num_occurrences in 1..150_usize) {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
// Insert "aaa ccc" the specified number of times into the list
|
||||
for _ in 0..num_occurrences {
|
||||
|
||||
@@ -146,7 +146,7 @@ pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
|
||||
let weight = self.weight(EnableScoring::disabled_from_searcher(searcher))?;
|
||||
let mut result = 0;
|
||||
for reader in searcher.segment_readers() {
|
||||
result += weight.count(reader)? as usize;
|
||||
result += weight.count(reader.as_ref())? as usize;
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
@@ -212,7 +212,7 @@ impl InvertedIndexRangeWeight {
|
||||
}
|
||||
|
||||
impl Weight for InvertedIndexRangeWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let max_doc = reader.max_doc();
|
||||
let mut doc_bitset = BitSet::with_max_value(max_doc);
|
||||
|
||||
@@ -245,7 +245,7 @@ impl Weight for InvertedIndexRangeWeight {
|
||||
Ok(Box::new(ConstScorer::new(doc_bitset, boost)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(does_not_match(doc));
|
||||
@@ -429,7 +429,7 @@ mod tests {
|
||||
docs.push(doc);
|
||||
}
|
||||
|
||||
docs.shuffle(&mut rand::thread_rng());
|
||||
docs.shuffle(&mut rand::rng());
|
||||
let mut docs_it = docs.into_iter();
|
||||
for doc in (&mut docs_it).take(50) {
|
||||
index_writer.add_document(doc)?;
|
||||
@@ -686,7 +686,7 @@ mod tests {
|
||||
.weight(EnableScoring::disabled_from_schema(&schema))
|
||||
.unwrap();
|
||||
let range_scorer = range_weight
|
||||
.scorer(&searcher.segment_readers()[0], 1.0f32)
|
||||
.scorer(searcher.segment_readers()[0].as_ref(), 1.0f32)
|
||||
.unwrap();
|
||||
range_scorer
|
||||
};
|
||||
|
||||
@@ -52,7 +52,7 @@ impl FastFieldRangeWeight {
|
||||
}
|
||||
|
||||
impl Weight for FastFieldRangeWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
// Check if both bounds are Bound::Unbounded
|
||||
if self.bounds.is_unbounded() {
|
||||
return Ok(Box::new(AllScorer::new(reader.max_doc())));
|
||||
@@ -219,7 +219,7 @@ impl Weight for FastFieldRangeWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
@@ -236,7 +236,7 @@ impl Weight for FastFieldRangeWeight {
|
||||
///
|
||||
/// Convert into fast field value space and search.
|
||||
fn search_on_json_numerical_field(
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
field_name: &str,
|
||||
typ: Type,
|
||||
bounds: BoundsRange<ValueBytes<Vec<u8>>>,
|
||||
@@ -491,7 +491,7 @@ mod tests {
|
||||
use common::DateTime;
|
||||
use proptest::prelude::*;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::seq::IndexedRandom;
|
||||
use rand::SeedableRng;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::docset::DocSet;
|
||||
use crate::docset::{DocSet, SeekDangerResult};
|
||||
use crate::query::score_combiner::ScoreCombiner;
|
||||
use crate::query::Scorer;
|
||||
use crate::{DocId, Score};
|
||||
@@ -56,9 +56,9 @@ where
|
||||
self.req_scorer.seek(target)
|
||||
}
|
||||
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
self.score_cache = None;
|
||||
self.req_scorer.seek_into_the_danger_zone(target)
|
||||
self.req_scorer.seek_danger(target)
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
|
||||
@@ -263,7 +263,9 @@ mod tests {
|
||||
let mut block_max_scores_b = vec![];
|
||||
let mut docs = vec![];
|
||||
{
|
||||
let mut term_scorer = term_weight.term_scorer_for_test(reader, 1.0)?.unwrap();
|
||||
let mut term_scorer = term_weight
|
||||
.term_scorer_for_test(reader.as_ref(), 1.0)?
|
||||
.unwrap();
|
||||
while term_scorer.doc() != TERMINATED {
|
||||
let mut score = term_scorer.score();
|
||||
docs.push(term_scorer.doc());
|
||||
@@ -277,7 +279,9 @@ mod tests {
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut term_scorer = term_weight.term_scorer_for_test(reader, 1.0)?.unwrap();
|
||||
let mut term_scorer = term_weight
|
||||
.term_scorer_for_test(reader.as_ref(), 1.0)?
|
||||
.unwrap();
|
||||
for d in docs {
|
||||
term_scorer.seek_block(d);
|
||||
block_max_scores_b.push(term_scorer.block_max_score());
|
||||
@@ -304,10 +308,10 @@ mod tests {
|
||||
let mut writer: IndexWriter =
|
||||
index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
|
||||
use rand::Rng;
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
for _ in 0..3_000 {
|
||||
let term_freq = rng.gen_range(1..10000);
|
||||
let term_freq = rng.random_range(1..10000);
|
||||
let words: Vec<&str> = std::iter::repeat_n("bbbb", term_freq).collect();
|
||||
let text = words.join(" ");
|
||||
writer.add_document(doc!(text_field=>text))?;
|
||||
|
||||
@@ -34,11 +34,11 @@ impl TermOrEmptyOrAllScorer {
|
||||
}
|
||||
|
||||
impl Weight for TermWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
fn scorer(&self, reader: &dyn SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
Ok(self.specialized_scorer(reader, boost)?.into_boxed_scorer())
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
fn explain(&self, reader: &dyn SegmentReader, doc: DocId) -> crate::Result<Explanation> {
|
||||
match self.specialized_scorer(reader, 1.0)? {
|
||||
TermOrEmptyOrAllScorer::TermScorer(mut term_scorer) => {
|
||||
if term_scorer.doc() > doc || term_scorer.seek(doc) != doc {
|
||||
@@ -53,7 +53,7 @@ impl Weight for TermWeight {
|
||||
}
|
||||
}
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
fn count(&self, reader: &dyn SegmentReader) -> crate::Result<u32> {
|
||||
if let Some(alive_bitset) = reader.alive_bitset() {
|
||||
Ok(self.scorer(reader, 1.0)?.count(alive_bitset))
|
||||
} else {
|
||||
@@ -68,7 +68,7 @@ impl Weight for TermWeight {
|
||||
/// `DocSet` and push the scored documents to the collector.
|
||||
fn for_each(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
) -> crate::Result<()> {
|
||||
match self.specialized_scorer(reader, 1.0)? {
|
||||
@@ -87,7 +87,7 @@ impl Weight for TermWeight {
|
||||
/// `DocSet` and push the scored documents to the collector.
|
||||
fn for_each_no_score(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
callback: &mut dyn FnMut(&[DocId]),
|
||||
) -> crate::Result<()> {
|
||||
match self.specialized_scorer(reader, 1.0)? {
|
||||
@@ -118,7 +118,7 @@ impl Weight for TermWeight {
|
||||
fn for_each_pruning(
|
||||
&self,
|
||||
threshold: Score,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||
) -> crate::Result<()> {
|
||||
let specialized_scorer = self.specialized_scorer(reader, 1.0)?;
|
||||
@@ -166,7 +166,7 @@ impl TermWeight {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn term_scorer_for_test(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
boost: Score,
|
||||
) -> crate::Result<Option<TermScorer>> {
|
||||
let scorer = self.specialized_scorer(reader, boost)?;
|
||||
@@ -178,7 +178,7 @@ impl TermWeight {
|
||||
|
||||
fn specialized_scorer(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
reader: &dyn SegmentReader,
|
||||
boost: Score,
|
||||
) -> crate::Result<TermOrEmptyOrAllScorer> {
|
||||
let field = self.term.field();
|
||||
@@ -206,7 +206,10 @@ impl TermWeight {
|
||||
)))
|
||||
}
|
||||
|
||||
fn fieldnorm_reader(&self, segment_reader: &SegmentReader) -> crate::Result<FieldNormReader> {
|
||||
fn fieldnorm_reader(
|
||||
&self,
|
||||
segment_reader: &dyn SegmentReader,
|
||||
) -> crate::Result<FieldNormReader> {
|
||||
if self.scoring_enabled {
|
||||
if let Some(field_norm_reader) = segment_reader
|
||||
.fieldnorms_readers()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use common::TinySet;
|
||||
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
|
||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
|
||||
use crate::query::size_hint::estimate_union;
|
||||
use crate::query::Scorer;
|
||||
@@ -225,25 +225,47 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
|
||||
fn seek_danger(&mut self, target: DocId) -> SeekDangerResult {
|
||||
if target >= TERMINATED {
|
||||
return SeekDangerResult::SeekLowerBound(TERMINATED);
|
||||
}
|
||||
if self.is_in_horizon(target) {
|
||||
// Our value is within the buffered horizon and the docset may already have been
|
||||
// processed and removed, so we need to use seek, which uses the regular advance.
|
||||
self.seek(target) == target
|
||||
} else {
|
||||
// The docsets are not in the buffered range, so we can use seek_into_the_danger_zone
|
||||
// of the underlying docsets
|
||||
let is_hit = self
|
||||
.docsets
|
||||
.iter_mut()
|
||||
.any(|docset| docset.seek_into_the_danger_zone(target));
|
||||
let seek_doc = self.seek(target);
|
||||
if seek_doc == target {
|
||||
return SeekDangerResult::Found;
|
||||
} else {
|
||||
return SeekDangerResult::SeekLowerBound(seek_doc);
|
||||
};
|
||||
}
|
||||
|
||||
// The API requires the DocSet to be in a valid state when `seek_into_the_danger_zone`
|
||||
// returns true.
|
||||
if is_hit {
|
||||
self.seek(target);
|
||||
// The docsets are not in the buffered range, so we can use seek_into_the_danger_zone
|
||||
// of the underlying docsets
|
||||
let mut is_hit = false;
|
||||
let mut min_new_target = TERMINATED;
|
||||
|
||||
for docset in self.docsets.iter_mut() {
|
||||
match docset.seek_danger(target) {
|
||||
SeekDangerResult::Found => {
|
||||
is_hit = true;
|
||||
break;
|
||||
}
|
||||
SeekDangerResult::SeekLowerBound(new_target) => {
|
||||
min_new_target = min_new_target.min(new_target);
|
||||
}
|
||||
}
|
||||
is_hit
|
||||
}
|
||||
|
||||
// The API requires the DocSet to be in a valid state when `seek_into_the_danger_zone`
|
||||
// returns Found.
|
||||
if is_hit {
|
||||
// The doc is found. Let's make sure we position the union on the target
|
||||
// to bring it back to a valid state.
|
||||
self.seek(target);
|
||||
SeekDangerResult::Found
|
||||
} else {
|
||||
SeekDangerResult::SeekLowerBound(min_new_target)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ mod tests {
|
||||
use common::BitSet;
|
||||
|
||||
use super::{SimpleUnion, *};
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::docset::{DocSet, SeekDangerResult, TERMINATED};
|
||||
use crate::postings::tests::test_skip_against_unoptimized;
|
||||
use crate::query::score_combiner::DoNothingCombiner;
|
||||
use crate::query::union::bitset_union::BitSetPostingUnion;
|
||||
@@ -254,6 +254,27 @@ mod tests {
|
||||
vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_buffered_union_seek_into_danger_zone_terminated() {
|
||||
let scorer1 = ConstScorer::new(VecDocSet::from(vec![1, 2]), 1.0);
|
||||
let scorer2 = ConstScorer::new(VecDocSet::from(vec![2, 3]), 1.0);
|
||||
|
||||
let mut union_scorer =
|
||||
BufferedUnionScorer::build(vec![scorer1, scorer2], DoNothingCombiner::default, 100);
|
||||
|
||||
// Advance to end
|
||||
while union_scorer.doc() != TERMINATED {
|
||||
union_scorer.advance();
|
||||
}
|
||||
|
||||
assert_eq!(union_scorer.doc(), TERMINATED);
|
||||
|
||||
assert_eq!(
|
||||
union_scorer.seek_danger(TERMINATED),
|
||||
SeekDangerResult::SeekLowerBound(TERMINATED)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user