Compare commits

...

9 Commits

Author SHA1 Message Date
Paul Masurel
a5dc888cb9 Removed BlockNotLoaded 2026-01-06 14:37:10 +01:00
Paul Masurel
f62a806f47 Explicit doc for the meaning of intersection_priority 2026-01-06 14:09:52 +01:00
Paul Masurel
601541e9ae Added unit tests 2026-01-02 13:07:00 +01:00
Paul Masurel
98be1a5423 Added seek_doc to intersections.
tantivy requires Scorer to be positioned on a DocId at all time.
This decision is not performance neutral.

When we have an intersection of a heavy DocSet with a lighter one
forcing the positioning of the first doc is needlessly expensive.

This PR fixes this by introducing a seek_doc parameter in the weight function.
Weights may skip over documents when they create the Scorer.
2025-12-31 18:20:28 +01:00
Paul Masurel
b11605f045 Addressing clippy comments (#2789)
Co-authored-by: Paul Masurel <paul.masurel@datadoghq.com>
2025-12-31 18:02:00 +01:00
ChangRui-Ryan
75d7989cc6 add benchmark for boolean query with range sub query (#2787) 2025-12-31 12:00:53 +01:00
PSeitz
923f0508f2 seek_exact + cost based intersection (#2538)
* seek_exact + cost based intersection

Adds `seek_exact` and `cost` to `DocSet` for a more efficient intersection.
Unlike `seek`, `seek_exact` does not require the DocSet to advance to the next hit, if the target does not exist.

`cost` allows to address the different DocSet types and their cost
model and is used to determine the DocSet that drives the intersection.
E.g. fast field range queries may do a full scan. Phrase queries load the positions to check if a we have a hit.
They both have a higher cost than their size_hint would suggest.

Improves `size_hint` estimation for intersection and union, by having a
estimation based on random distribution with a co-location factor.

Refactor range query benchmark.

Closes #2531

*Future Work*

Implement `seek_exact` for BufferedUnionScorer and RangeDocSet (fast field range queries)
Evaluate replacing `seek` with `seek_exact` to reduce code complexity

* Apply suggestions from code review

Co-authored-by: Paul Masurel <paul@quickwit.io>

* add API contract verfication

* impl seek_exact on union

* rename seek_exact

* add mixed AND OR test, fix buffered_union

* Add a proptest of BooleanQuery. (#2690)

* fix build

* Increase the document count.

* fix merge conflict

* fix debug assert

* Fix compilation errors after rebase

- Remove duplicate proptest_boolean_query module
- Remove duplicate cost() method implementations
- Fix TopDocs API usage (add .order_by_score())
- Remove duplicate imports
- Remove unused variable assignments

---------

Co-authored-by: Paul Masurel <paul@quickwit.io>
Co-authored-by: Pascal Seitz <pascal.seitz@datadoghq.com>
Co-authored-by: Stu Hood <stuhood@gmail.com>
2025-12-30 14:43:25 +01:00
ChangRui-Ryan
e0b62e00ac optimize RangeDocSet for non-overlapping query ranges (#2783) 2025-12-29 16:55:28 +01:00
Stu Hood
ce97beb86f Add support for natural-order-with-none-highest in TopDocs::order_by (#2780)
* Add `ComparatorEnum::NaturalNoneHigher`.

* Fix comments.
2025-12-23 09:22:20 +01:00
47 changed files with 2100 additions and 678 deletions

View File

@@ -75,12 +75,12 @@ typetag = "0.2.21"
winapi = "0.3.9"
[dev-dependencies]
binggan = "0.14.0"
binggan = "0.14.2"
rand = "0.8.5"
maplit = "1.0.2"
matches = "0.1.9"
pretty_assertions = "1.2.1"
proptest = "1.0.0"
proptest = "1.7.0"
test-log = "0.2.10"
futures = "0.3.21"
paste = "1.0.11"
@@ -173,6 +173,18 @@ harness = false
name = "exists_json"
harness = false
[[bench]]
name = "range_query"
harness = false
[[bench]]
name = "and_or_queries"
harness = false
[[bench]]
name = "range_queries"
harness = false
[[bench]]
name = "bool_queries_with_range"
harness = false

View File

@@ -0,0 +1,288 @@
use binggan::{black_box, BenchGroup, BenchRunner};
use rand::prelude::*;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tantivy::collector::{Collector, Count, DocSetCollector, TopDocs};
use tantivy::query::{Query, QueryParser};
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Order, ReloadPolicy, Searcher};
#[derive(Clone)]
struct BenchIndex {
#[allow(dead_code)]
index: Index,
searcher: Searcher,
query_parser: QueryParser,
}
fn build_shared_indices(num_docs: usize, p_title_a: f32, distribution: &str) -> BenchIndex {
// Unified schema
let mut schema_builder = Schema::builder();
let f_title = schema_builder.add_text_field("title", TEXT);
let f_num_rand = schema_builder.add_u64_field("num_rand", INDEXED);
let f_num_asc = schema_builder.add_u64_field("num_asc", INDEXED);
let f_num_rand_fast = schema_builder.add_u64_field("num_rand_fast", INDEXED | FAST);
let f_num_asc_fast = schema_builder.add_u64_field("num_asc_fast", INDEXED | FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
// Populate index with stable RNG for reproducibility.
let mut rng = StdRng::from_seed([7u8; 32]);
{
let mut writer = index.writer_with_num_threads(1, 4_000_000_000).unwrap();
match distribution {
"dense" => {
for doc_id in 0..num_docs {
// Always add title to avoid empty documents
let title_token = if rng.gen_bool(p_title_a as f64) {
"a"
} else {
"b"
};
let num_rand = rng.gen_range(0u64..1000u64);
let num_asc = (doc_id / 10000) as u64;
writer
.add_document(doc!(
f_title=>title_token,
f_num_rand=>num_rand,
f_num_asc=>num_asc,
f_num_rand_fast=>num_rand,
f_num_asc_fast=>num_asc,
))
.unwrap();
}
}
"sparse" => {
for doc_id in 0..num_docs {
// Always add title to avoid empty documents
let title_token = if rng.gen_bool(p_title_a as f64) {
"a"
} else {
"b"
};
let num_rand = rng.gen_range(0u64..10000000u64);
let num_asc = doc_id as u64;
writer
.add_document(doc!(
f_title=>title_token,
f_num_rand=>num_rand,
f_num_asc=>num_asc,
f_num_rand_fast=>num_rand,
f_num_asc_fast=>num_asc,
))
.unwrap();
}
}
_ => {
panic!("Unsupported distribution type");
}
}
writer.commit().unwrap();
}
// Prepare reader/searcher once.
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let searcher = reader.searcher();
// Build query parser for title field
let qp_title = QueryParser::for_index(&index, vec![f_title]);
BenchIndex {
index,
searcher,
query_parser: qp_title,
}
}
fn main() {
// Prepare corpora with varying scenarios
let scenarios = vec![
(
"dense and 99% a".to_string(),
10_000_000,
0.99,
"dense",
0,
9,
),
(
"dense and 99% a".to_string(),
10_000_000,
0.99,
"dense",
990,
999,
),
(
"sparse and 99% a".to_string(),
10_000_000,
0.99,
"sparse",
0,
9,
),
(
"sparse and 99% a".to_string(),
10_000_000,
0.99,
"sparse",
9_999_990,
9_999_999,
),
];
let mut runner = BenchRunner::new();
for (scenario_id, n, p_title_a, num_rand_distribution, range_low, range_high) in scenarios {
// Build index for this scenario
let bench_index = build_shared_indices(n, p_title_a, num_rand_distribution);
// Create benchmark group
let mut group = runner.new_group();
// Now set the name (this moves scenario_id)
group.set_name(scenario_id);
// Define all four field types
let field_names = ["num_rand", "num_asc", "num_rand_fast", "num_asc_fast"];
// Define the three terms we want to test with
let terms = ["a", "b", "z"];
// Generate all combinations of terms and field names
let mut queries = Vec::new();
for &term in &terms {
for &field_name in &field_names {
let query_str = format!(
"{} AND {}:[{} TO {}]",
term, field_name, range_low, range_high
);
queries.push((query_str, field_name.to_string()));
}
}
let query_str = format!(
"{}:[{} TO {}] AND {}:[{} TO {}]",
"num_rand_fast", range_low, range_high, "num_asc_fast", range_low, range_high
);
queries.push((query_str, "num_asc_fast".to_string()));
// Run all benchmark tasks for each query and its corresponding field name
for (query_str, field_name) in queries {
run_benchmark_tasks(&mut group, &bench_index, &query_str, &field_name);
}
group.run();
}
}
/// Run all benchmark tasks for a given query string and field name
fn run_benchmark_tasks(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query_str: &str,
field_name: &str,
) {
// Test count
add_bench_task(bench_group, bench_index, query_str, Count, "count");
// Test all results
add_bench_task(
bench_group,
bench_index,
query_str,
DocSetCollector,
"all results",
);
// Test top 100 by the field (if it's a FAST field)
if field_name.ends_with("_fast") {
// Ascending order
{
let collector_name = format!("top100_by_{}_asc", field_name);
let field_name_owned = field_name.to_string();
add_bench_task(
bench_group,
bench_index,
query_str,
TopDocs::with_limit(100).order_by_fast_field::<u64>(field_name_owned, Order::Asc),
&collector_name,
);
}
// Descending order
{
let collector_name = format!("top100_by_{}_desc", field_name);
let field_name_owned = field_name.to_string();
add_bench_task(
bench_group,
bench_index,
query_str,
TopDocs::with_limit(100).order_by_fast_field::<u64>(field_name_owned, Order::Desc),
&collector_name,
);
}
}
}
fn add_bench_task<C: Collector + 'static>(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query_str: &str,
collector: C,
collector_name: &str,
) {
let task_name = format!("{}_{}", query_str.replace(" ", "_"), collector_name);
let query = bench_index.query_parser.parse_query(query_str).unwrap();
let search_task = SearchTask {
searcher: bench_index.searcher.clone(),
collector,
query,
};
bench_group.register(task_name, move |_| black_box(search_task.run()));
}
struct SearchTask<C: Collector> {
searcher: Searcher,
collector: C,
query: Box<dyn Query>,
}
impl<C: Collector> SearchTask<C> {
#[inline(never)]
pub fn run(&self) -> usize {
let result = self.searcher.search(&self.query, &self.collector).unwrap();
if let Some(count) = (&result as &dyn std::any::Any).downcast_ref::<usize>() {
*count
} else if let Some(top_docs) = (&result as &dyn std::any::Any)
.downcast_ref::<Vec<(Option<u64>, tantivy::DocAddress)>>()
{
top_docs.len()
} else if let Some(top_docs) =
(&result as &dyn std::any::Any).downcast_ref::<Vec<(u64, tantivy::DocAddress)>>()
{
top_docs.len()
} else if let Some(doc_set) = (&result as &dyn std::any::Any)
.downcast_ref::<std::collections::HashSet<tantivy::DocAddress>>()
{
doc_set.len()
} else {
eprintln!(
"Unknown collector result type: {:?}",
std::any::type_name::<C::Fruit>()
);
0
}
}
}

365
benches/range_queries.rs Normal file
View File

@@ -0,0 +1,365 @@
use std::ops::Bound;
use binggan::{black_box, BenchGroup, BenchRunner};
use rand::prelude::*;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tantivy::collector::{Count, DocSetCollector, TopDocs};
use tantivy::query::RangeQuery;
use tantivy::schema::{Schema, FAST, INDEXED};
use tantivy::{doc, Index, Order, ReloadPolicy, Searcher, Term};
#[derive(Clone)]
struct BenchIndex {
#[allow(dead_code)]
index: Index,
searcher: Searcher,
}
fn build_shared_indices(num_docs: usize, distribution: &str) -> BenchIndex {
// Schema with fast fields only
let mut schema_builder = Schema::builder();
let f_num_rand_fast = schema_builder.add_u64_field("num_rand_fast", INDEXED | FAST);
let f_num_asc_fast = schema_builder.add_u64_field("num_asc_fast", INDEXED | FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
// Populate index with stable RNG for reproducibility.
let mut rng = StdRng::from_seed([7u8; 32]);
{
let mut writer = index.writer_with_num_threads(1, 4_000_000_000).unwrap();
match distribution {
"dense" => {
for doc_id in 0..num_docs {
let num_rand = rng.gen_range(0u64..1000u64);
let num_asc = (doc_id / 10000) as u64;
writer
.add_document(doc!(
f_num_rand_fast=>num_rand,
f_num_asc_fast=>num_asc,
))
.unwrap();
}
}
"sparse" => {
for doc_id in 0..num_docs {
let num_rand = rng.gen_range(0u64..10000000u64);
let num_asc = doc_id as u64;
writer
.add_document(doc!(
f_num_rand_fast=>num_rand,
f_num_asc_fast=>num_asc,
))
.unwrap();
}
}
_ => {
panic!("Unsupported distribution type");
}
}
writer.commit().unwrap();
}
// Prepare reader/searcher once.
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let searcher = reader.searcher();
BenchIndex { index, searcher }
}
fn main() {
// Prepare corpora with varying scenarios
let scenarios = vec![
// Dense distribution - random values in small range (0-999)
(
"dense_values_search_low_value_range".to_string(),
10_000_000,
"dense",
0,
9,
),
(
"dense_values_search_high_value_range".to_string(),
10_000_000,
"dense",
990,
999,
),
(
"dense_values_search_out_of_range".to_string(),
10_000_000,
"dense",
1000,
1002,
),
(
"sparse_values_search_low_value_range".to_string(),
10_000_000,
"sparse",
0,
9,
),
(
"sparse_values_search_high_value_range".to_string(),
10_000_000,
"sparse",
9_999_990,
9_999_999,
),
(
"sparse_values_search_out_of_range".to_string(),
10_000_000,
"sparse",
10_000_000,
10_000_002,
),
];
let mut runner = BenchRunner::new();
for (scenario_id, n, num_rand_distribution, range_low, range_high) in scenarios {
// Build index for this scenario
let bench_index = build_shared_indices(n, num_rand_distribution);
// Create benchmark group
let mut group = runner.new_group();
// Now set the name (this moves scenario_id)
group.set_name(scenario_id);
// Define fast field types
let field_names = ["num_rand_fast", "num_asc_fast"];
// Generate range queries for fast fields
for &field_name in &field_names {
// Create the range query
let field = bench_index.searcher.schema().get_field(field_name).unwrap();
let lower_term = Term::from_field_u64(field, range_low);
let upper_term = Term::from_field_u64(field, range_high);
let query = RangeQuery::new(Bound::Included(lower_term), Bound::Included(upper_term));
run_benchmark_tasks(
&mut group,
&bench_index,
query,
field_name,
range_low,
range_high,
);
}
group.run();
}
}
/// Run all benchmark tasks for a given range query and field name
fn run_benchmark_tasks(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
field_name: &str,
range_low: u64,
range_high: u64,
) {
// Test count
add_bench_task_count(
bench_group,
bench_index,
query.clone(),
"count",
field_name,
range_low,
range_high,
);
// Test top 100 by the field (ascending order)
{
let collector_name = format!("top100_by_{}_asc", field_name);
let field_name_owned = field_name.to_string();
add_bench_task_top100_asc(
bench_group,
bench_index,
query.clone(),
&collector_name,
field_name,
range_low,
range_high,
field_name_owned,
);
}
// Test top 100 by the field (descending order)
{
let collector_name = format!("top100_by_{}_desc", field_name);
let field_name_owned = field_name.to_string();
add_bench_task_top100_desc(
bench_group,
bench_index,
query,
&collector_name,
field_name,
range_low,
range_high,
field_name_owned,
);
}
}
fn add_bench_task_count(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
collector_name: &str,
field_name: &str,
range_low: u64,
range_high: u64,
) {
let task_name = format!(
"range_{}_[{} TO {}]_{}",
field_name, range_low, range_high, collector_name
);
let search_task = CountSearchTask {
searcher: bench_index.searcher.clone(),
query,
};
bench_group.register(task_name, move |_| black_box(search_task.run()));
}
fn add_bench_task_docset(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
collector_name: &str,
field_name: &str,
range_low: u64,
range_high: u64,
) {
let task_name = format!(
"range_{}_[{} TO {}]_{}",
field_name, range_low, range_high, collector_name
);
let search_task = DocSetSearchTask {
searcher: bench_index.searcher.clone(),
query,
};
bench_group.register(task_name, move |_| black_box(search_task.run()));
}
fn add_bench_task_top100_asc(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
collector_name: &str,
field_name: &str,
range_low: u64,
range_high: u64,
field_name_owned: String,
) {
let task_name = format!(
"range_{}_[{} TO {}]_{}",
field_name, range_low, range_high, collector_name
);
let search_task = Top100AscSearchTask {
searcher: bench_index.searcher.clone(),
query,
field_name: field_name_owned,
};
bench_group.register(task_name, move |_| black_box(search_task.run()));
}
fn add_bench_task_top100_desc(
bench_group: &mut BenchGroup,
bench_index: &BenchIndex,
query: RangeQuery,
collector_name: &str,
field_name: &str,
range_low: u64,
range_high: u64,
field_name_owned: String,
) {
let task_name = format!(
"range_{}_[{} TO {}]_{}",
field_name, range_low, range_high, collector_name
);
let search_task = Top100DescSearchTask {
searcher: bench_index.searcher.clone(),
query,
field_name: field_name_owned,
};
bench_group.register(task_name, move |_| black_box(search_task.run()));
}
struct CountSearchTask {
searcher: Searcher,
query: RangeQuery,
}
impl CountSearchTask {
#[inline(never)]
pub fn run(&self) -> usize {
self.searcher.search(&self.query, &Count).unwrap()
}
}
struct DocSetSearchTask {
searcher: Searcher,
query: RangeQuery,
}
impl DocSetSearchTask {
#[inline(never)]
pub fn run(&self) -> usize {
let result = self.searcher.search(&self.query, &DocSetCollector).unwrap();
result.len()
}
}
struct Top100AscSearchTask {
searcher: Searcher,
query: RangeQuery,
field_name: String,
}
impl Top100AscSearchTask {
#[inline(never)]
pub fn run(&self) -> usize {
let collector =
TopDocs::with_limit(100).order_by_fast_field::<u64>(&self.field_name, Order::Asc);
let result = self.searcher.search(&self.query, &collector).unwrap();
for (_score, doc_address) in &result {
let _doc: tantivy::TantivyDocument = self.searcher.doc(*doc_address).unwrap();
}
result.len()
}
}
struct Top100DescSearchTask {
searcher: Searcher,
query: RangeQuery,
field_name: String,
}
impl Top100DescSearchTask {
#[inline(never)]
pub fn run(&self) -> usize {
let collector =
TopDocs::with_limit(100).order_by_fast_field::<u64>(&self.field_name, Order::Desc);
let result = self.searcher.search(&self.query, &collector).unwrap();
for (_score, doc_address) in &result {
let _doc: tantivy::TantivyDocument = self.searcher.doc(*doc_address).unwrap();
}
result.len()
}
}

260
benches/range_query.rs Normal file
View File

@@ -0,0 +1,260 @@
use std::fmt::Display;
use std::net::Ipv6Addr;
use std::ops::RangeInclusive;
use binggan::plugins::PeakMemAllocPlugin;
use binggan::{black_box, BenchRunner, OutputValue, PeakMemAlloc, INSTRUMENTED_SYSTEM};
use columnar::MonotonicallyMappableToU128;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use tantivy::collector::{Count, TopDocs};
use tantivy::query::QueryParser;
use tantivy::schema::*;
use tantivy::{doc, Index};
#[global_allocator]
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
fn main() {
bench_range_query();
}
fn bench_range_query() {
let index = get_index_0_to_100();
let mut runner = BenchRunner::new();
runner.add_plugin(PeakMemAllocPlugin::new(GLOBAL));
runner.set_name("range_query on u64");
let field_name_and_descr: Vec<_> = vec![
("id", "Single Valued Range Field"),
("ids", "Multi Valued Range Field"),
];
let range_num_hits = vec![
("90_percent", get_90_percent()),
("10_percent", get_10_percent()),
("1_percent", get_1_percent()),
];
test_range(&mut runner, &index, &field_name_and_descr, range_num_hits);
runner.set_name("range_query on ip");
let field_name_and_descr: Vec<_> = vec![
("ip", "Single Valued Range Field"),
("ips", "Multi Valued Range Field"),
];
let range_num_hits = vec![
("90_percent", get_90_percent_ip()),
("10_percent", get_10_percent_ip()),
("1_percent", get_1_percent_ip()),
];
test_range(&mut runner, &index, &field_name_and_descr, range_num_hits);
}
fn test_range<T: Display>(
runner: &mut BenchRunner,
index: &Index,
field_name_and_descr: &[(&str, &str)],
range_num_hits: Vec<(&str, RangeInclusive<T>)>,
) {
for (field, suffix) in field_name_and_descr {
let term_num_hits = vec![
("", ""),
("1_percent", "veryfew"),
("10_percent", "few"),
("90_percent", "most"),
];
let mut group = runner.new_group();
group.set_name(suffix);
// all intersect combinations
for (range_name, range) in &range_num_hits {
for (term_name, term) in &term_num_hits {
let index = &index;
let test_name = if term_name.is_empty() {
format!("id_range_hit_{}", range_name)
} else {
format!(
"id_range_hit_{}_intersect_with_term_{}",
range_name, term_name
)
};
group.register(test_name, move |_| {
let query = if term_name.is_empty() {
"".to_string()
} else {
format!("AND id_name:{}", term)
};
black_box(execute_query(field, range, &query, index));
});
}
}
group.run();
}
}
fn get_index_0_to_100() -> Index {
let mut rng = StdRng::from_seed([1u8; 32]);
let num_vals = 100_000;
let docs: Vec<_> = (0..num_vals)
.map(|_i| {
let id_name = if rng.gen_bool(0.01) {
"veryfew".to_string() // 1%
} else if rng.gen_bool(0.1) {
"few".to_string() // 9%
} else {
"most".to_string() // 90%
};
Doc {
id_name,
id: rng.gen_range(0..100),
// Multiply by 1000, so that we create most buckets in the compact space
// The benches depend on this range to select n-percent of elements with the
// methods below.
ip: Ipv6Addr::from_u128(rng.gen_range(0..100) * 1000),
}
})
.collect();
create_index_from_docs(&docs)
}
#[derive(Clone, Debug)]
pub struct Doc {
pub id_name: String,
pub id: u64,
pub ip: Ipv6Addr,
}
pub fn create_index_from_docs(docs: &[Doc]) -> Index {
let mut schema_builder = Schema::builder();
let id_u64_field = schema_builder.add_u64_field("id", INDEXED | STORED | FAST);
let ids_u64_field =
schema_builder.add_u64_field("ids", NumericOptions::default().set_fast().set_indexed());
let id_f64_field = schema_builder.add_f64_field("id_f64", INDEXED | STORED | FAST);
let ids_f64_field = schema_builder.add_f64_field(
"ids_f64",
NumericOptions::default().set_fast().set_indexed(),
);
let id_i64_field = schema_builder.add_i64_field("id_i64", INDEXED | STORED | FAST);
let ids_i64_field = schema_builder.add_i64_field(
"ids_i64",
NumericOptions::default().set_fast().set_indexed(),
);
let text_field = schema_builder.add_text_field("id_name", STRING | STORED);
let text_field2 = schema_builder.add_text_field("id_name_fast", STRING | STORED | FAST);
let ip_field = schema_builder.add_ip_addr_field("ip", FAST);
let ips_field = schema_builder.add_ip_addr_field("ips", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 50_000_000).unwrap();
for doc in docs.iter() {
index_writer
.add_document(doc!(
ids_i64_field => doc.id as i64,
ids_i64_field => doc.id as i64,
ids_f64_field => doc.id as f64,
ids_f64_field => doc.id as f64,
ids_u64_field => doc.id,
ids_u64_field => doc.id,
id_u64_field => doc.id,
id_f64_field => doc.id as f64,
id_i64_field => doc.id as i64,
text_field => doc.id_name.to_string(),
text_field2 => doc.id_name.to_string(),
ips_field => doc.ip,
ips_field => doc.ip,
ip_field => doc.ip,
))
.unwrap();
}
index_writer.commit().unwrap();
}
index
}
fn get_90_percent() -> RangeInclusive<u64> {
0..=90
}
fn get_10_percent() -> RangeInclusive<u64> {
0..=10
}
fn get_1_percent() -> RangeInclusive<u64> {
10..=10
}
fn get_90_percent_ip() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(90 * 1000);
start..=end
}
fn get_10_percent_ip() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(10 * 1000);
start..=end
}
fn get_1_percent_ip() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(10 * 1000);
let end = Ipv6Addr::from_u128(10 * 1000);
start..=end
}
struct NumHits {
count: usize,
}
impl OutputValue for NumHits {
fn column_title() -> &'static str {
"NumHits"
}
fn format(&self) -> Option<String> {
Some(self.count.to_string())
}
}
fn execute_query<T: Display>(
field: &str,
id_range: &RangeInclusive<T>,
suffix: &str,
index: &Index,
) -> NumHits {
let gen_query_inclusive = |from: &T, to: &T| {
format!(
"{}:[{} TO {}] {}",
field,
&from.to_string(),
&to.to_string(),
suffix
)
};
let query = gen_query_inclusive(id_range.start(), id_range.end());
execute_query_(&query, index)
}
fn execute_query_(query: &str, index: &Index) -> NumHits {
let query_from_text = |text: &str| {
QueryParser::for_index(index, vec![])
.parse_query(text)
.unwrap()
};
let query = query_from_text(query);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let num_hits = searcher
.search(&query, &(TopDocs::with_limit(10).order_by_score(), Count))
.unwrap()
.1;
NumHits { count: num_hits }
}

View File

@@ -41,12 +41,6 @@ fn transform_range_before_linear_transformation(
if range.is_empty() {
return None;
}
if stats.min_value > *range.end() {
return None;
}
if stats.max_value < *range.start() {
return None;
}
let shifted_range =
range.start().saturating_sub(stats.min_value)..=range.end().saturating_sub(stats.min_value);
let start_before_gcd_multiplication: u64 = div_ceil(*shifted_range.start(), stats.gcd);

View File

@@ -446,7 +446,7 @@ impl DocumentQueryEvaluator {
let weight = query.weight(EnableScoring::disabled_from_schema(&schema))?;
// Get a scorer that iterates over matching documents
let mut scorer = weight.scorer(segment_reader, 1.0)?;
let mut scorer = weight.scorer(segment_reader, 1.0, 0)?;
// Create a BitSet to hold all matching documents
let mut bitset = BitSet::with_max_value(max_doc);

View File

@@ -12,8 +12,13 @@ pub trait Comparator<T>: Send + Sync + std::fmt::Debug + Default {
fn compare(&self, lhs: &T, rhs: &T) -> Ordering;
}
/// With the natural comparator, the top k collector will return
/// the top documents in decreasing order.
/// Compare values naturally (e.g. 1 < 2).
///
/// When used with `TopDocs`, which reverses the order, this results in a
/// "Descending" sort (Greatest values first).
///
/// `None` (or Null for `OwnedValue`) values are considered to be smaller than any other value,
/// and will therefore appear last in a descending sort (e.g. `[Some(20), Some(10), None]`).
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct NaturalComparator;
@@ -24,14 +29,18 @@ impl<T: PartialOrd> Comparator<T> for NaturalComparator {
}
}
/// Sorts document in reverse order.
/// Compare values in reverse (e.g. 2 < 1).
///
/// If the sort key is None, it will considered as the lowest value, and will therefore appear
/// first.
/// When used with `TopDocs`, which reverses the order, this results in an
/// "Ascending" sort (Smallest values first).
///
/// `None` is considered smaller than `Some` in the underlying comparator, but because the
/// comparison is reversed, `None` is effectively treated as the lowest value in the resulting
/// Ascending sort (e.g. `[None, Some(10), Some(20)]`).
///
/// The ReverseComparator does not necessarily imply that the sort order is reversed compared
/// to the NaturalComparator. In presence of a tie on the sort key, documents will always be
/// sorted by ascending `DocId`/`DocAddress` in TopN results, regardless of the comparator.
/// sorted by ascending `DocId`/`DocAddress` in TopN results, regardless of the sort key's order.
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct ReverseComparator;
@@ -44,11 +53,15 @@ where NaturalComparator: Comparator<T>
}
}
/// Sorts document in reverse order, but considers None as having the lowest value.
/// Compare values in reverse, but treating `None` as lower than `Some`.
///
/// When used with `TopDocs`, which reverses the order, this results in an
/// "Ascending" sort (Smallest values first), but with `None` values appearing last
/// (e.g. `[Some(10), Some(20), None]`).
///
/// This is usually what is wanted when sorting by a field in an ascending order.
/// For instance, in a e-commerce website, if I sort by price ascending, I most likely want the
/// cheapest items first, and the items without a price at last.
/// For instance, in an e-commerce website, if sorting by price ascending,
/// the cheapest items would appear first, and items without a price would appear last.
#[derive(Debug, Copy, Clone, Default)]
pub struct ReverseNoneIsLowerComparator;
@@ -108,6 +121,70 @@ impl Comparator<String> for ReverseNoneIsLowerComparator {
}
}
/// Compare values naturally, but treating `None` as higher than `Some`.
///
/// When used with `TopDocs`, which reverses the order, this results in a
/// "Descending" sort (Greatest values first), but with `None` values appearing first
/// (e.g. `[None, Some(20), Some(10)]`).
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct NaturalNoneIsHigherComparator;
impl<T> Comparator<Option<T>> for NaturalNoneIsHigherComparator
where NaturalComparator: Comparator<T>
{
#[inline(always)]
fn compare(&self, lhs_opt: &Option<T>, rhs_opt: &Option<T>) -> Ordering {
match (lhs_opt, rhs_opt) {
(None, None) => Ordering::Equal,
(None, Some(_)) => Ordering::Greater,
(Some(_), None) => Ordering::Less,
(Some(lhs), Some(rhs)) => NaturalComparator.compare(lhs, rhs),
}
}
}
impl Comparator<u32> for NaturalNoneIsHigherComparator {
#[inline(always)]
fn compare(&self, lhs: &u32, rhs: &u32) -> Ordering {
NaturalComparator.compare(lhs, rhs)
}
}
impl Comparator<u64> for NaturalNoneIsHigherComparator {
#[inline(always)]
fn compare(&self, lhs: &u64, rhs: &u64) -> Ordering {
NaturalComparator.compare(lhs, rhs)
}
}
impl Comparator<f64> for NaturalNoneIsHigherComparator {
#[inline(always)]
fn compare(&self, lhs: &f64, rhs: &f64) -> Ordering {
NaturalComparator.compare(lhs, rhs)
}
}
impl Comparator<f32> for NaturalNoneIsHigherComparator {
#[inline(always)]
fn compare(&self, lhs: &f32, rhs: &f32) -> Ordering {
NaturalComparator.compare(lhs, rhs)
}
}
impl Comparator<i64> for NaturalNoneIsHigherComparator {
#[inline(always)]
fn compare(&self, lhs: &i64, rhs: &i64) -> Ordering {
NaturalComparator.compare(lhs, rhs)
}
}
impl Comparator<String> for NaturalNoneIsHigherComparator {
#[inline(always)]
fn compare(&self, lhs: &String, rhs: &String) -> Ordering {
NaturalComparator.compare(lhs, rhs)
}
}
/// An enum representing the different sort orders.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Default)]
pub enum ComparatorEnum {
@@ -116,8 +193,10 @@ pub enum ComparatorEnum {
Natural,
/// Reverse order (See [ReverseComparator])
Reverse,
/// Reverse order by treating None as the lowest value.(See [ReverseNoneLowerComparator])
/// Reverse order by treating None as the lowest value. (See [ReverseNoneLowerComparator])
ReverseNoneLower,
/// Natural order but treating None as the highest value. (See [NaturalNoneIsHigherComparator])
NaturalNoneHigher,
}
impl From<Order> for ComparatorEnum {
@@ -134,6 +213,7 @@ where
ReverseNoneIsLowerComparator: Comparator<T>,
NaturalComparator: Comparator<T>,
ReverseComparator: Comparator<T>,
NaturalNoneIsHigherComparator: Comparator<T>,
{
#[inline(always)]
fn compare(&self, lhs: &T, rhs: &T) -> Ordering {
@@ -141,6 +221,7 @@ where
ComparatorEnum::Natural => NaturalComparator.compare(lhs, rhs),
ComparatorEnum::Reverse => ReverseComparator.compare(lhs, rhs),
ComparatorEnum::ReverseNoneLower => ReverseNoneIsLowerComparator.compare(lhs, rhs),
ComparatorEnum::NaturalNoneHigher => NaturalNoneIsHigherComparator.compare(lhs, rhs),
}
}
}
@@ -347,3 +428,31 @@ where
.convert_segment_sort_key(sort_key)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_natural_none_is_higher() {
let comp = NaturalNoneIsHigherComparator;
let null = None;
let v1 = Some(1_u64);
let v2 = Some(2_u64);
// NaturalNoneIsGreaterComparator logic:
// 1. Delegates to NaturalComparator for non-nulls.
// NaturalComparator compare(2, 1) -> 2.cmp(1) -> Greater.
assert_eq!(comp.compare(&v2, &v1), Ordering::Greater);
// 2. Treats None (Null) as Greater than any value.
// compare(None, Some(2)) should be Greater.
assert_eq!(comp.compare(&null, &v2), Ordering::Greater);
// compare(Some(1), None) should be Less.
assert_eq!(comp.compare(&v1, &null), Ordering::Less);
// compare(None, None) should be Equal.
assert_eq!(comp.compare(&null, &null), Ordering::Equal);
}
}

View File

@@ -40,6 +40,9 @@ pub trait DocSet: Send {
/// of `DocSet` should support it.
///
/// Calling `seek(TERMINATED)` is also legal and is the normal way to consume a `DocSet`.
///
/// `target` has to be larger or equal to `.doc()` when calling `seek`.
/// If `target` is equal to `.doc()` then the DocSet should not advance.
fn seek(&mut self, target: DocId) -> DocId {
let mut doc = self.doc();
debug_assert!(doc <= target);
@@ -49,6 +52,33 @@ pub trait DocSet: Send {
doc
}
/// Seeks to the target if possible and returns true if the target is in the DocSet.
///
/// DocSets that already have an efficient `seek` method don't need to implement
/// `seek_into_the_danger_zone`. All wrapper DocSets should forward
/// `seek_into_the_danger_zone` to the underlying DocSet.
///
/// ## API Behaviour
/// If `seek_into_the_danger_zone` is returning true, a call to `doc()` has to return target.
/// If `seek_into_the_danger_zone` is returning false, a call to `doc()` may return any doc
/// between the last doc that matched and target or a doc that is a valid next hit after
/// target. The DocSet is considered to be in an invalid state until
/// `seek_into_the_danger_zone` returns true again.
///
/// `target` needs to be equal or larger than `doc` when in a valid state.
///
/// Consecutive calls are not allowed to have decreasing `target` values.
///
/// # Warning
/// This is an advanced API used by intersection. The API contract is tricky, avoid using it.
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
let current_doc = self.doc();
if current_doc < target {
self.seek(target);
}
self.doc() == target
}
/// Fills a given mutable buffer with the next doc ids from the
/// `DocSet`
///
@@ -94,6 +124,15 @@ pub trait DocSet: Send {
/// which would be the number of documents in the DocSet.
///
/// By default this returns `size_hint()`.
///
/// DocSets may have vastly different cost depending on their type,
/// e.g. an intersection with 10 hits is much cheaper than
/// a phrase search with 10 hits, since it needs to load positions.
///
/// ### Future Work
/// We may want to differentiate `DocSet` costs more more granular, e.g.
/// creation_cost, advance_cost, seek_cost on to get a good estimation
/// what query types to choose.
fn cost(&self) -> u64 {
self.size_hint() as u64
}
@@ -128,6 +167,19 @@ pub trait DocSet: Send {
}
}
/// Consumes the `DocSet` and returns a Vec with all of the docs in the DocSet
/// including the current doc.
#[cfg(test)]
pub fn docset_to_doc_vec(mut doc_set: Box<dyn DocSet>) -> Vec<DocId> {
let mut output = Vec::new();
let mut doc = doc_set.doc();
while doc != TERMINATED {
output.push(doc);
doc = doc_set.advance();
}
output
}
impl DocSet for &mut dyn DocSet {
fn advance(&mut self) -> u32 {
(**self).advance()
@@ -137,6 +189,10 @@ impl DocSet for &mut dyn DocSet {
(**self).seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
(**self).seek_into_the_danger_zone(target)
}
fn doc(&self) -> u32 {
(**self).doc()
}
@@ -169,6 +225,11 @@ impl<TDocSet: DocSet + ?Sized> DocSet for Box<TDocSet> {
unboxed.seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.seek_into_the_danger_zone(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId; COLLECT_BLOCK_BUFFER_LEN]) -> usize {
let unboxed: &mut TDocSet = self.borrow_mut();
unboxed.fill_buffer(buffer)

View File

@@ -113,7 +113,7 @@ mod tests {
IndexRecordOption::WithFreqs,
);
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32)?;
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32, 0)?;
assert_eq!(scorer.doc(), 0);
assert!((scorer.score() - 0.22920431).abs() < 0.001f32);
assert_eq!(scorer.advance(), 1);
@@ -142,7 +142,7 @@ mod tests {
IndexRecordOption::WithFreqs,
);
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32)?;
let mut scorer = weight.scorer(searcher.segment_reader(0), 1.0f32, 0)?;
assert_eq!(scorer.doc(), 0);
assert!((scorer.score() - 0.22920431).abs() < 0.001f32);
assert_eq!(scorer.advance(), 1);

View File

@@ -14,6 +14,7 @@ use crate::positions::PositionReader;
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo};
use crate::schema::{IndexRecordOption, Term, Type};
use crate::termdict::TermDictionary;
use crate::DocId;
/// The inverted index reader is in charge of accessing
/// the inverted index associated with a specific field.
@@ -192,9 +193,34 @@ impl InvertedIndexReader {
term: &Term,
option: IndexRecordOption,
) -> io::Result<Option<BlockSegmentPostings>> {
self.get_term_info(term)?
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
.transpose()
let Some(term_info) = self.get_term_info(term)? else {
return Ok(None);
};
let block_postings_not_loaded =
self.read_block_postings_from_terminfo(&term_info, option)?;
Ok(Some(block_postings_not_loaded))
}
/// Returns a block postings given a `term_info`.
/// This method is for an advanced usage only.
///
/// Most users should prefer using [`Self::read_postings()`] instead.
pub(crate) fn read_block_postings_from_terminfo_with_seek(
&self,
term_info: &TermInfo,
requested_option: IndexRecordOption,
seek_doc: DocId,
) -> io::Result<(BlockSegmentPostings, usize)> {
let postings_data = self
.postings_file_slice
.slice(term_info.postings_range.clone());
BlockSegmentPostings::open(
term_info.doc_freq,
postings_data,
self.record_option,
requested_option,
seek_doc,
)
}
/// Returns a block postings given a `term_info`.
@@ -206,15 +232,9 @@ impl InvertedIndexReader {
term_info: &TermInfo,
requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> {
let postings_data = self
.postings_file_slice
.slice(term_info.postings_range.clone());
BlockSegmentPostings::open(
term_info.doc_freq,
postings_data,
self.record_option,
requested_option,
)
let (block_segment_postings, _) =
self.read_block_postings_from_terminfo_with_seek(term_info, requested_option, 0)?;
Ok(block_segment_postings)
}
/// Returns a posting object given a `term_info`.
@@ -224,13 +244,13 @@ impl InvertedIndexReader {
pub fn read_postings_from_terminfo(
&self,
term_info: &TermInfo,
option: IndexRecordOption,
record_option: IndexRecordOption,
seek_doc: DocId,
) -> io::Result<SegmentPostings> {
let option = option.downgrade(self.record_option);
let block_postings = self.read_block_postings_from_terminfo(term_info, option)?;
let (block_segment_postings, position_within_block) =
self.read_block_postings_from_terminfo_with_seek(term_info, record_option, seek_doc)?;
let position_reader = {
if option.has_positions() {
if record_option.has_positions() {
let positions_data = self
.positions_file_slice
.read_bytes_slice(term_info.positions_range.clone())?;
@@ -241,8 +261,9 @@ impl InvertedIndexReader {
}
};
Ok(SegmentPostings::from_block_postings(
block_postings,
block_segment_postings,
position_reader,
position_within_block,
))
}
@@ -268,7 +289,7 @@ impl InvertedIndexReader {
option: IndexRecordOption,
) -> io::Result<Option<SegmentPostings>> {
self.get_term_info(term)?
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option))
.map(move |term_info| self.read_postings_from_terminfo(&term_info, option, 0u32))
.transpose()
}

View File

@@ -28,19 +28,12 @@ struct InnerDeleteQueue {
/// Several consumers can hold a reference to it. Delete operations
/// get dropped/gc'ed when no more consumers are holding a reference
/// to them.
#[derive(Clone)]
#[derive(Clone, Default)]
pub struct DeleteQueue {
inner: Arc<RwLock<InnerDeleteQueue>>,
}
impl DeleteQueue {
/// Creates a new empty delete queue.
pub fn new() -> DeleteQueue {
DeleteQueue {
inner: Arc::default(),
}
}
fn get_last_block(&self) -> Arc<Block> {
{
// try get the last block with simply acquiring the read lock.
@@ -256,7 +249,12 @@ mod tests {
struct DummyWeight;
impl Weight for DummyWeight {
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
_reader: &SegmentReader,
_boost: Score,
_seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
Err(crate::TantivyError::InternalError("dummy impl".to_owned()))
}
@@ -267,7 +265,7 @@ mod tests {
#[test]
fn test_deletequeue() {
let delete_queue = DeleteQueue::new();
let delete_queue = DeleteQueue::default();
let make_op = |i: usize| DeleteOperation {
opstamp: i as u64,

View File

@@ -303,7 +303,7 @@ impl<D: Document> IndexWriter<D> {
let (document_sender, document_receiver) =
crossbeam_channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
let delete_queue = DeleteQueue::new();
let delete_queue = DeleteQueue::default();
let current_opstamp = index.load_metas()?.opstamp;

View File

@@ -367,8 +367,11 @@ impl IndexMerger {
for (segment_ord, term_info) in merged_terms.current_segment_ords_and_term_infos() {
let segment_reader = &self.readers[segment_ord];
let inverted_index: &InvertedIndexReader = &field_readers[segment_ord];
let segment_postings = inverted_index
.read_postings_from_terminfo(&term_info, segment_postings_option)?;
let segment_postings = inverted_index.read_postings_from_terminfo(
&term_info,
segment_postings_option,
0u32,
)?;
let alive_bitset_opt = segment_reader.alive_bitset();
let doc_freq = if let Some(alive_bitset) = alive_bitset_opt {
segment_postings.doc_freq_given_deletes(alive_bitset)

View File

@@ -4,7 +4,7 @@
//! `IndexWriter` is the main entry point for that, which created from
//! [`Index::writer`](crate::Index::writer).
pub mod delete_queue;
pub(crate) mod delete_queue;
pub(crate) mod path_to_unordered_id;
pub(crate) mod doc_id_mapping;

View File

@@ -117,7 +117,7 @@ mod tests {
#[test]
fn test_segment_register() {
let inventory = SegmentMetaInventory::default();
let delete_queue = DeleteQueue::new();
let delete_queue = DeleteQueue::default();
let mut segment_register = SegmentRegister::default();
let segment_id_a = SegmentId::generate_random();

View File

@@ -99,7 +99,8 @@ impl BlockSegmentPostings {
data: FileSlice,
mut record_option: IndexRecordOption,
requested_option: IndexRecordOption,
) -> io::Result<BlockSegmentPostings> {
seek_doc: DocId,
) -> io::Result<(BlockSegmentPostings, usize)> {
let bytes = data.read_bytes()?;
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, bytes)?;
let skip_reader = match skip_data_opt {
@@ -125,7 +126,7 @@ impl BlockSegmentPostings {
(_, _) => FreqReadingOption::ReadFreq,
};
let mut block_segment_postings = BlockSegmentPostings {
let mut block_segment_postings: BlockSegmentPostings = BlockSegmentPostings {
doc_decoder: BlockDecoder::with_val(TERMINATED),
block_loaded: false,
freq_decoder: BlockDecoder::with_val(1),
@@ -135,8 +136,13 @@ impl BlockSegmentPostings {
data: postings_data,
skip_reader,
};
block_segment_postings.load_block();
Ok(block_segment_postings)
let inner_pos = if seek_doc == 0 {
block_segment_postings.load_block();
0
} else {
block_segment_postings.seek(seek_doc)
};
Ok((block_segment_postings, inner_pos))
}
/// Returns the block_max_score for the current block.
@@ -258,7 +264,9 @@ impl BlockSegmentPostings {
self.doc_decoder.output_len
}
/// Position on a block that may contains `target_doc`.
/// Position on a block that may contains `target_doc`, and returns the
/// position of the first document greater than or equal to `target_doc`
/// within that block.
///
/// If all docs are smaller than target, the block loaded may be empty,
/// or be the last an incomplete VInt block.
@@ -453,7 +461,7 @@ mod tests {
doc_ids.push(130);
{
let block_segments = build_block_postings(&doc_ids)?;
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
let mut docset = SegmentPostings::from_block_postings(block_segments, None, 0);
assert_eq!(docset.seek(128), 129);
assert_eq!(docset.doc(), 129);
assert_eq!(docset.advance(), 130);
@@ -461,8 +469,8 @@ mod tests {
assert_eq!(docset.advance(), TERMINATED);
}
{
let block_segments = build_block_postings(&doc_ids).unwrap();
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
let block_segments = build_block_postings(&doc_ids)?;
let mut docset = SegmentPostings::from_block_postings(block_segments, None, 0);
assert_eq!(docset.seek(129), 129);
assert_eq!(docset.doc(), 129);
assert_eq!(docset.advance(), 130);
@@ -471,7 +479,7 @@ mod tests {
}
{
let block_segments = build_block_postings(&doc_ids)?;
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
let mut docset = SegmentPostings::from_block_postings(block_segments, None, 0);
assert_eq!(docset.doc(), 0);
assert_eq!(docset.seek(131), TERMINATED);
assert_eq!(docset.doc(), TERMINATED);

View File

@@ -9,6 +9,7 @@ const COMPRESSED_BLOCK_MAX_SIZE: usize = COMPRESSION_BLOCK_SIZE * MAX_VINT_SIZE;
mod vint;
/// Returns the size in bytes of a compressed block, given `num_bits`.
#[inline]
pub fn compressed_block_size(num_bits: u8) -> usize {
(num_bits as usize) * COMPRESSION_BLOCK_SIZE / 8
}

View File

@@ -79,14 +79,15 @@ impl SegmentPostings {
.close_term(docs.len() as u32)
.expect("In memory Serialization should never fail.");
}
let block_segment_postings = BlockSegmentPostings::open(
let (block_segment_postings, position_within_block) = BlockSegmentPostings::open(
docs.len() as u32,
FileSlice::from(buffer),
IndexRecordOption::Basic,
IndexRecordOption::Basic,
0u32,
)
.unwrap();
SegmentPostings::from_block_postings(block_segment_postings, None)
SegmentPostings::from_block_postings(block_segment_postings, None, position_within_block)
}
/// Helper functions to create `SegmentPostings` for tests.
@@ -127,28 +128,29 @@ impl SegmentPostings {
postings_serializer
.close_term(doc_and_tfs.len() as u32)
.unwrap();
let block_segment_postings = BlockSegmentPostings::open(
let (block_segment_postings, position_within_block) = BlockSegmentPostings::open(
doc_and_tfs.len() as u32,
FileSlice::from(buffer),
IndexRecordOption::WithFreqs,
IndexRecordOption::WithFreqs,
0u32,
)
.unwrap();
SegmentPostings::from_block_postings(block_segment_postings, None)
SegmentPostings::from_block_postings(block_segment_postings, None, position_within_block)
}
/// Reads a Segment postings from an &[u8]
///
/// * `len` - number of document in the posting lists.
/// * `data` - data array. The complete data is not necessarily used.
/// * `freq_handler` - the freq handler is in charge of decoding frequencies and/or positions
/// Creates a Segment Postings from a
/// - `BlockSegmentPostings`,
/// - a position reader
/// - a target document to seek to
pub(crate) fn from_block_postings(
segment_block_postings: BlockSegmentPostings,
position_reader: Option<PositionReader>,
position_within_block: usize,
) -> SegmentPostings {
SegmentPostings {
block_cursor: segment_block_postings,
cur: 0, // cursor within the block
cur: position_within_block,
position_reader,
}
}

View File

@@ -21,7 +21,12 @@ impl Query for AllQuery {
pub struct AllWeight;
impl Weight for AllWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
_seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
let all_scorer = AllScorer::new(reader.max_doc());
if boost != 1.0 {
Ok(Box::new(BoostScorer::new(all_scorer, boost)))
@@ -62,6 +67,15 @@ impl DocSet for AllScorer {
self.doc
}
fn seek(&mut self, target: DocId) -> DocId {
debug_assert!(target >= self.doc);
self.doc = target;
if self.doc >= self.max_doc {
self.doc = TERMINATED;
}
self.doc
}
fn fill_buffer(&mut self, buffer: &mut [DocId; COLLECT_BLOCK_BUFFER_LEN]) -> usize {
if self.doc() == TERMINATED {
return 0;
@@ -131,7 +145,7 @@ mod tests {
let weight = AllQuery.weight(EnableScoring::disabled_from_schema(&index.schema()))?;
{
let reader = searcher.segment_reader(0);
let mut scorer = weight.scorer(reader, 1.0)?;
let mut scorer = weight.scorer(reader, 1.0, 0)?;
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), 1u32);
assert_eq!(scorer.doc(), 1u32);
@@ -139,7 +153,7 @@ mod tests {
}
{
let reader = searcher.segment_reader(1);
let mut scorer = weight.scorer(reader, 1.0)?;
let mut scorer = weight.scorer(reader, 1.0, 0)?;
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.advance(), TERMINATED);
}
@@ -154,12 +168,12 @@ mod tests {
let weight = AllQuery.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
let reader = searcher.segment_reader(0);
{
let mut scorer = weight.scorer(reader, 2.0)?;
let mut scorer = weight.scorer(reader, 2.0, 0)?;
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 2.0);
}
{
let mut scorer = weight.scorer(reader, 1.5)?;
let mut scorer = weight.scorer(reader, 1.5, 0)?;
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.5);
}

View File

@@ -84,7 +84,12 @@ where
A: Automaton + Send + Sync + 'static,
A::State: Clone,
{
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
let inverted_index = reader.inverted_index(self.field)?;
@@ -92,8 +97,12 @@ where
let mut term_stream = self.automaton_stream(term_dict)?;
while term_stream.advance() {
let term_info = term_stream.value();
let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
let (mut block_segment_postings, _) = inverted_index
.read_block_postings_from_terminfo_with_seek(
term_info,
IndexRecordOption::Basic,
seek_doc,
)?;
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
@@ -111,7 +120,7 @@ where
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
if scorer.seek(doc) == doc {
Ok(Explanation::new("AutomatonScorer", 1.0))
} else {
@@ -186,7 +195,7 @@ mod tests {
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
let reader = index.reader()?;
let searcher = reader.searcher();
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.0);
assert_eq!(scorer.advance(), 2u32);
@@ -203,7 +212,7 @@ mod tests {
let automaton_weight = AutomatonWeight::new(field, PrefixedByA);
let reader = index.reader()?;
let searcher = reader.searcher();
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.32)?;
let mut scorer = automaton_weight.scorer(searcher.segment_reader(0u32), 1.32, 0)?;
assert_eq!(scorer.doc(), 0u32);
assert_eq!(scorer.score(), 1.32);
Ok(())

View File

@@ -483,7 +483,7 @@ mod tests {
let checkpoints_for_each_pruning =
compute_checkpoints_for_each_pruning(term_scorers.clone(), top_k);
let checkpoints_manual =
compute_checkpoints_manual(term_scorers.clone(), top_k, 100_000);
compute_checkpoints_manual(term_scorers.clone(), top_k, max_doc as u32);
assert_eq!(checkpoints_for_each_pruning.len(), checkpoints_manual.len());
for (&(left_doc, left_score), &(right_doc, right_score)) in checkpoints_for_each_pruning
.iter()

View File

@@ -12,7 +12,7 @@ use crate::query::{
intersect_scorers, AllScorer, BufferedUnionScorer, EmptyScorer, Exclude, Explanation, Occur,
RequiredOptionalScorer, Scorer, Weight,
};
use crate::{DocId, Score};
use crate::{DocId, Score, TERMINATED};
enum SpecializedScorer {
TermUnion(Vec<TermScorer>),
@@ -156,6 +156,19 @@ fn effective_should_scorer_for_union<TScoreCombiner: ScoreCombiner>(
}
}
fn create_scorer(
weight: &dyn Weight,
reader: &SegmentReader,
boost: Score,
target_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
if target_doc >= reader.max_doc() {
Ok(Box::new(EmptyScorer))
} else {
weight.scorer(reader, boost, target_doc)
}
}
enum ShouldScorersCombinationMethod {
// Should scorers are irrelevant.
Ignored,
@@ -207,10 +220,29 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
&self,
reader: &SegmentReader,
boost: Score,
mut seek_first_doc: DocId,
) -> crate::Result<HashMap<Occur, Vec<Box<dyn Scorer>>>> {
let mut per_occur_scorers: HashMap<Occur, Vec<Box<dyn Scorer>>> = HashMap::new();
for (occur, subweight) in &self.weights {
let sub_scorer: Box<dyn Scorer> = subweight.scorer(reader, boost)?;
let (mut must_weights, other_weights): (Vec<(Occur, _)>, Vec<(Occur, _)>) = self
.weights
.iter()
.map(|(occur, weight)| (*occur, weight))
.partition(|(occur, _weight)| *occur == Occur::Must);
// We start by must weights in order to get the best "seek_first_doc" so that we
// can skip the first few documents of the other scorers.
must_weights.sort_by_key(|weight| weight.1.intersection_priority());
for (_, must_sub_weight) in must_weights {
let sub_scorer: Box<dyn Scorer> =
create_scorer(must_sub_weight.as_ref(), reader, boost, seek_first_doc)?;
seek_first_doc = seek_first_doc.max(sub_scorer.doc());
per_occur_scorers
.entry(Occur::Must)
.or_default()
.push(sub_scorer);
}
for (occur, sub_weight) in &other_weights {
let sub_scorer: Box<dyn Scorer> =
create_scorer(sub_weight.as_ref(), reader, boost, seek_first_doc)?;
per_occur_scorers
.entry(*occur)
.or_default()
@@ -224,9 +256,10 @@ impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
reader: &SegmentReader,
boost: Score,
score_combiner_fn: impl Fn() -> TComplexScoreCombiner,
seek_doc: u32,
) -> crate::Result<SpecializedScorer> {
let num_docs = reader.num_docs();
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
let mut per_occur_scorers = self.per_occur_scorers(reader, boost, seek_doc)?;
// Indicate how should clauses are combined with must clauses.
let mut must_scorers: Vec<Box<dyn Scorer>> =
@@ -407,7 +440,7 @@ fn remove_and_count_all_and_empty_scorers(
if scorer.is::<AllScorer>() {
counts.num_all_scorers += 1;
false
} else if scorer.is::<EmptyScorer>() {
} else if scorer.doc() == TERMINATED {
counts.num_empty_scorers += 1;
false
} else {
@@ -418,7 +451,12 @@ fn remove_and_count_all_and_empty_scorers(
}
impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombiner> {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
let num_docs = reader.num_docs();
if self.weights.is_empty() {
Ok(Box::new(EmptyScorer))
@@ -427,15 +465,15 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
if occur == Occur::MustNot {
Ok(Box::new(EmptyScorer))
} else {
weight.scorer(reader, boost)
weight.scorer(reader, boost, seek_doc)
}
} else if self.scoring_enabled {
self.complex_scorer(reader, boost, &self.score_combiner_fn)
self.complex_scorer(reader, boost, &self.score_combiner_fn, seek_doc)
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, &self.score_combiner_fn, num_docs)
})
} else {
self.complex_scorer(reader, boost, DoNothingCombiner::default)
self.complex_scorer(reader, boost, DoNothingCombiner::default, seek_doc)
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, DoNothingCombiner::default, num_docs)
})
@@ -443,7 +481,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
@@ -467,7 +505,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn, 0)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
let mut union_scorer = BufferedUnionScorer::build(
@@ -489,7 +527,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
reader: &SegmentReader,
callback: &mut dyn FnMut(&[DocId]),
) -> crate::Result<()> {
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner)?;
let scorer = self.complex_scorer(reader, 1.0, || DoNothingCombiner, 0u32)?;
let mut buffer = [0u32; COLLECT_BLOCK_BUFFER_LEN];
match scorer {
@@ -524,7 +562,7 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn, 0u32)?;
match scorer {
SpecializedScorer::TermUnion(term_scorers) => {
super::block_wand(term_scorers, threshold, callback);

View File

@@ -57,7 +57,7 @@ mod tests {
let query = query_parser.parse_query("+a")?;
let searcher = index.reader()?.searcher();
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert!(scorer.is::<TermScorer>());
Ok(())
}
@@ -70,13 +70,13 @@ mod tests {
{
let query = query_parser.parse_query("+a +b +c")?;
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert!(scorer.is::<Intersection<TermScorer>>());
}
{
let query = query_parser.parse_query("+a +(b c)")?;
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert!(scorer.is::<Intersection<Box<dyn Scorer>>>());
}
Ok(())
@@ -90,14 +90,14 @@ mod tests {
{
let query = query_parser.parse_query("+a b")?;
let weight = query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert!(scorer
.is::<RequiredOptionalScorer<Box<dyn Scorer>, Box<dyn Scorer>, SumCombiner>>());
}
{
let query = query_parser.parse_query("+a b")?;
let weight = query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert!(scorer.is::<TermScorer>());
}
Ok(())
@@ -244,12 +244,14 @@ mod tests {
.weight(EnableScoring::enabled_from_searcher(&searcher))
.unwrap();
{
let mut boolean_scorer = boolean_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let mut boolean_scorer =
boolean_weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 0.84163445);
}
{
let mut boolean_scorer = boolean_weight.scorer(searcher.segment_reader(0u32), 2.0)?;
let mut boolean_scorer =
boolean_weight.scorer(searcher.segment_reader(0u32), 2.0, 0)?;
assert_eq!(boolean_scorer.doc(), 0u32);
assert_nearly_equals!(boolean_scorer.score(), 1.6832689);
}
@@ -343,7 +345,7 @@ mod tests {
(Occur::Must, term_match_some.box_clone()),
]);
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
assert!(scorer.is::<TermScorer>());
}
{
@@ -353,7 +355,7 @@ mod tests {
(Occur::Must, term_match_none.box_clone()),
]);
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
assert!(scorer.is::<EmptyScorer>());
}
{
@@ -362,7 +364,7 @@ mod tests {
(Occur::Should, term_match_none.box_clone()),
]);
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
assert!(scorer.is::<AllScorer>());
}
{
@@ -371,7 +373,7 @@ mod tests {
(Occur::Should, term_match_none.box_clone()),
]);
let weight = query.weight(EnableScoring::disabled_from_searcher(&searcher))?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32)?;
let scorer = weight.scorer(searcher.segment_reader(0u32), 1.0f32, 0)?;
assert!(scorer.is::<TermScorer>());
}
Ok(())
@@ -611,6 +613,134 @@ mod tests {
Ok(())
}
/// Test that the seek_doc parameter correctly skips documents in BooleanWeight::scorer.
///
/// When seek_doc is provided, the scorer should start from that document (or the first
/// matching document >= seek_doc), skipping earlier documents.
#[test]
pub fn test_boolean_weight_seek_doc() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let value_field = schema_builder.add_u64_field("value", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer: IndexWriter = index.writer_for_tests()?;
// Create 11 documents:
// doc 0: value=0
// doc 1: value=10
// doc 2: value=20
// ...
// doc 9: value=90
// doc 10: value=50 (matches range 30-70)
for i in 0..10 {
index_writer.add_document(doc!(
text_field => "hello",
value_field => (i * 10) as u64
))?;
}
index_writer.add_document(doc!(
text_field => "hello",
value_field => 50u64
))?;
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
// Create a Boolean query: MUST(term "hello") AND MUST(range 30..=70)
// This should match docs with value in [30, 70]: docs 3, 4, 5, 6, 7, 10
let term_query: Box<dyn Query> = Box::new(TermQuery::new(
Term::from_field_text(text_field, "hello"),
IndexRecordOption::Basic,
));
let range_query: Box<dyn Query> = Box::new(RangeQuery::new(
Bound::Included(Term::from_field_u64(value_field, 30)),
Bound::Included(Term::from_field_u64(value_field, 70)),
));
let boolean_query =
BooleanQuery::new(vec![(Occur::Must, term_query), (Occur::Must, range_query)]);
let weight =
boolean_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
let doc_when_seeking_from = |seek_from: DocId| {
let scorer = weight.scorer(segment_reader, 1.0f32, seek_from).unwrap();
crate::docset::docset_to_doc_vec(scorer)
};
// Expected matching docs: 3, 4, 5, 6, 7, 10 (values 30, 40, 50, 60, 70, 50)
assert_eq!(doc_when_seeking_from(0), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(1), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(3), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(4), vec![4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(7), vec![7, 10]);
assert_eq!(doc_when_seeking_from(8), vec![10]);
assert_eq!(doc_when_seeking_from(10), vec![10]);
assert_eq!(doc_when_seeking_from(11), Vec::<DocId>::new());
Ok(())
}
/// Test that the seek_doc parameter works correctly with SHOULD clauses.
#[test]
pub fn test_boolean_weight_seek_doc_with_should() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer: IndexWriter = index.writer_for_tests()?;
// Create documents:
// doc 0: "a b"
// doc 1: "a"
// doc 2: "b"
// doc 3: "c"
// doc 4: "a b c"
index_writer.add_document(doc!(text_field => "a b"))?;
index_writer.add_document(doc!(text_field => "a"))?;
index_writer.add_document(doc!(text_field => "b"))?;
index_writer.add_document(doc!(text_field => "c"))?;
index_writer.add_document(doc!(text_field => "a b c"))?;
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
// Create a Boolean query: SHOULD(term "a") OR SHOULD(term "b")
// This should match docs 0, 1, 2, 4
let term_a: Box<dyn Query> = Box::new(TermQuery::new(
Term::from_field_text(text_field, "a"),
IndexRecordOption::Basic,
));
let term_b: Box<dyn Query> = Box::new(TermQuery::new(
Term::from_field_text(text_field, "b"),
IndexRecordOption::Basic,
));
let boolean_query =
BooleanQuery::new(vec![(Occur::Should, term_a), (Occur::Should, term_b)]);
let weight =
boolean_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
let doc_when_seeking_from = |seek_from: DocId| {
let scorer = weight.scorer(segment_reader, 1.0f32, seek_from).unwrap();
crate::docset::docset_to_doc_vec(scorer)
};
// Expected matching docs: 0, 1, 2, 4
assert_eq!(doc_when_seeking_from(0), vec![0, 1, 2, 4]);
assert_eq!(doc_when_seeking_from(1), vec![1, 2, 4]);
assert_eq!(doc_when_seeking_from(2), vec![2, 4]);
assert_eq!(doc_when_seeking_from(3), vec![4]);
assert_eq!(doc_when_seeking_from(4), vec![4]);
assert_eq!(doc_when_seeking_from(5), Vec::<DocId>::new());
Ok(())
}
/// Test multiple AllScorer instances in different clause types.
///
/// Verifies correct behavior when AllScorers appear in multiple positions.
@@ -678,7 +808,7 @@ mod tests {
#[cfg(test)]
mod proptest_boolean_query {
use std::collections::{BTreeMap, HashSet};
use std::ops::Bound;
use std::ops::{Bound, Range};
use proptest::collection::vec;
use proptest::prelude::*;
@@ -755,10 +885,10 @@ mod proptest_boolean_query {
}
}
fn doc_ids(num_docs: usize, num_fields: usize) -> impl Iterator<Item = DocId> {
fn doc_ids(num_docs: usize, num_fields: usize) -> Range<DocId> {
let permutations = 1 << num_fields;
let copies = (num_docs as f32 / permutations as f32).ceil() as u32;
(0..(permutations * copies)).into_iter()
0..(permutations * copies)
}
fn create_index_with_boolean_permutations(
@@ -817,7 +947,6 @@ mod proptest_boolean_query {
fn proptest_boolean_query() {
// In the presence of optimizations around buffering, it can take large numbers of
// documents to uncover some issues.
let num_docs = 10000;
let num_fields = 8;
let num_docs = 1 << num_fields;
let (index, fields, range_field) =

View File

@@ -67,8 +67,13 @@ impl BoostWeight {
}
impl Weight for BoostWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost)
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
self.weight.scorer(reader, boost * self.boost, seek_doc)
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
@@ -83,6 +88,10 @@ impl Weight for BoostWeight {
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
fn intersection_priority(&self) -> u32 {
self.weight.intersection_priority()
}
}
pub(crate) struct BoostScorer<S: Scorer> {
@@ -104,6 +113,9 @@ impl<S: Scorer> DocSet for BoostScorer<S> {
fn seek(&mut self, target: DocId) -> DocId {
self.underlying.seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
self.underlying.seek_into_the_danger_zone(target)
}
fn fill_buffer(&mut self, buffer: &mut [DocId; COLLECT_BLOCK_BUFFER_LEN]) -> usize {
self.underlying.fill_buffer(buffer)

View File

@@ -63,13 +63,18 @@ impl ConstWeight {
}
impl Weight for ConstWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
let inner_scorer = self.weight.scorer(reader, boost)?;
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
let inner_scorer = self.weight.scorer(reader, boost, seek_doc)?;
Ok(Box::new(ConstScorer::new(inner_scorer, boost * self.score)))
}
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
if scorer.seek(doc) != doc {
return Err(TantivyError::InvalidArgument(format!(
"Document #({doc}) does not match"
@@ -84,6 +89,10 @@ impl Weight for ConstWeight {
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
self.weight.count(reader)
}
fn intersection_priority(&self) -> u32 {
self.weight.intersection_priority()
}
}
/// Wraps a `DocSet` and simply returns a constant `Scorer`.

View File

@@ -62,6 +62,16 @@ impl<T: Scorer> DocSet for ScorerWrapper<T> {
self.current_doc = doc_id;
doc_id
}
fn seek(&mut self, target: DocId) -> DocId {
let doc_id = self.scorer.seek(target);
self.current_doc = doc_id;
doc_id
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
let found = self.scorer.seek_into_the_danger_zone(target);
self.current_doc = self.scorer.doc();
found
}
fn doc(&self) -> DocId {
self.current_doc

View File

@@ -26,13 +26,24 @@ impl Query for EmptyQuery {
/// It is useful for tests and handling edge cases.
pub struct EmptyWeight;
impl Weight for EmptyWeight {
fn scorer(&self, _reader: &SegmentReader, _boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
_reader: &SegmentReader,
_boost: Score,
_seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
Ok(Box::new(EmptyScorer))
}
fn explain(&self, _reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
Err(does_not_match(doc))
}
/// Returns a priority number used to sort weights when running an
/// intersection.
fn intersection_priority(&self) -> u32 {
0u32
}
}
/// `EmptyScorer` is a dummy `Scorer` in which no document matches.

View File

@@ -98,7 +98,12 @@ pub struct ExistsWeight {
}
impl Weight for ExistsWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
_seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
let fast_field_reader = reader.fast_fields();
let mut column_handles = fast_field_reader.dynamic_column_handles(&self.field_name)?;
if self.field_type == Type::Json && self.json_subpaths {
@@ -166,7 +171,7 @@ impl Weight for ExistsWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}

View File

@@ -1,5 +1,5 @@
use super::size_hint::estimate_intersection;
use crate::docset::{DocSet, TERMINATED};
use crate::query::size_hint::estimate_intersection;
use crate::query::term_query::TermScorer;
use crate::query::{EmptyScorer, Scorer};
use crate::{DocId, Score};
@@ -12,6 +12,9 @@ use crate::{DocId, Score};
/// For better performance, the function uses a
/// specialized implementation if the two
/// shortest scorers are `TermScorer`s.
///
/// num_docs_segment is the number of documents in the segment. It is used for estimating the
/// `size_hint` of the intersection.
pub fn intersect_scorers(
mut scorers: Vec<Box<dyn Scorer>>,
num_docs_segment: u32,
@@ -105,32 +108,44 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
fn advance(&mut self) -> DocId {
let (left, right) = (&mut self.left, &mut self.right);
let mut candidate = left.advance();
if candidate == TERMINATED {
return TERMINATED;
}
'outer: loop {
loop {
// In the first part we look for a document in the intersection
// of the two rarest `DocSet` in the intersection.
loop {
let right_doc = right.seek(candidate);
candidate = left.seek(right_doc);
if candidate == right_doc {
if right.seek_into_the_danger_zone(candidate) {
break;
}
let right_doc = right.doc();
// TODO: Think about which value would make sense here
// It depends on the DocSet implementation, when a seek would outweigh an advance.
if right_doc > candidate.wrapping_add(100) {
candidate = left.seek(right_doc);
} else {
candidate = left.advance();
}
if candidate == TERMINATED {
return TERMINATED;
}
}
debug_assert_eq!(left.doc(), right.doc());
// test the remaining scorers;
for docset in self.others.iter_mut() {
let seek_doc = docset.seek(candidate);
if seek_doc > candidate {
candidate = left.seek(seek_doc);
continue 'outer;
}
// test the remaining scorers
if self
.others
.iter_mut()
.all(|docset| docset.seek_into_the_danger_zone(candidate))
{
debug_assert_eq!(candidate, self.left.doc());
debug_assert_eq!(candidate, self.right.doc());
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
return candidate;
}
debug_assert_eq!(candidate, self.left.doc());
debug_assert_eq!(candidate, self.right.doc());
debug_assert!(self.others.iter().all(|docset| docset.doc() == candidate));
return candidate;
candidate = left.advance();
}
}
@@ -146,6 +161,19 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
doc
}
/// Seeks to the target if necessary and checks if the target is an exact match.
///
/// Some implementations may choose to advance past the target if beneficial for performance.
/// The return value is `true` if the target is in the docset, and `false` otherwise.
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
self.left.seek_into_the_danger_zone(target)
&& self.right.seek_into_the_danger_zone(target)
&& self
.others
.iter_mut()
.all(|docset| docset.seek_into_the_danger_zone(target))
}
fn doc(&self) -> DocId {
self.left.doc()
}
@@ -181,6 +209,8 @@ where
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use super::Intersection;
use crate::docset::{DocSet, TERMINATED};
use crate::postings::tests::test_skip_against_unoptimized;
@@ -270,4 +300,38 @@ mod tests {
let intersection = Intersection::new(vec![a, b, c], 10);
assert_eq!(intersection.doc(), TERMINATED);
}
// Strategy to generate sorted and deduplicated vectors of u32 document IDs
fn sorted_deduped_vec(max_val: u32, max_size: usize) -> impl Strategy<Value = Vec<u32>> {
prop::collection::vec(0..max_val, 0..max_size).prop_map(|mut vec| {
vec.sort();
vec.dedup();
vec
})
}
proptest! {
#[test]
fn prop_test_intersection_consistency(
a in sorted_deduped_vec(100, 10),
b in sorted_deduped_vec(100, 10),
num_docs in 100u32..500u32
) {
let left = VecDocSet::from(a.clone());
let right = VecDocSet::from(b.clone());
let mut intersection = Intersection::new(vec![left, right], num_docs);
let expected: Vec<u32> = a.iter()
.cloned()
.filter(|doc| b.contains(doc))
.collect();
for expected_doc in expected {
assert_eq!(intersection.doc(), expected_doc);
intersection.advance();
}
assert_eq!(intersection.doc(), TERMINATED);
}
}
}

View File

@@ -70,9 +70,83 @@ pub use self::weight::Weight;
#[cfg(test)]
mod tests {
use crate::collector::TopDocs;
use crate::query::phrase_query::tests::create_index;
use crate::query::QueryParser;
use crate::schema::{Schema, TEXT};
use crate::{Index, Term};
use crate::{DocAddress, Index, Term};
#[test]
pub fn test_mixed_intersection_and_union() -> crate::Result<()> {
let index = create_index(&["a b", "a c", "a b c", "b"])?;
let schema = index.schema();
let text_field = schema.get_field("text").unwrap();
let searcher = index.reader()?.searcher();
let do_search = |term: &str| {
let query = QueryParser::for_index(&index, vec![text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> = searcher
.search(&query, &TopDocs::with_limit(10).order_by_score())
.unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("a AND b"), vec![0, 2]);
assert_eq!(do_search("(a OR b) AND C"), vec![2, 1]);
// The intersection code has special code for more than 2 intersections
// left, right + others
// The will place the union in the "others" insersection to that seek_into_the_danger_zone
// is called
assert_eq!(
do_search("(a OR b) AND (c OR a) AND (b OR c)"),
vec![2, 1, 0]
);
Ok(())
}
#[test]
pub fn test_mixed_intersection_and_union_with_skip() -> crate::Result<()> {
// Test 4096 skip in BufferedUnionScorer
let mut data: Vec<&str> = Vec::new();
data.push("a b");
let zz_data = vec!["z z"; 5000];
data.extend_from_slice(&zz_data);
data.extend_from_slice(&["a c"]);
data.extend_from_slice(&zz_data);
data.extend_from_slice(&["a b c", "b"]);
let index = create_index(&data)?;
let schema = index.schema();
let text_field = schema.get_field("text").unwrap();
let searcher = index.reader()?.searcher();
let do_search = |term: &str| {
let query = QueryParser::for_index(&index, vec![text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> = searcher
.search(&query, &TopDocs::with_limit(10).order_by_score())
.unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("a AND b"), vec![0, 10002]);
assert_eq!(do_search("(a OR b) AND C"), vec![10002, 5001]);
// The intersection code has special code for more than 2 intersections
// left, right + others
// The will place the union in the "others" insersection to that seek_into_the_danger_zone
// is called
assert_eq!(
do_search("(a OR b) AND (c OR a) AND (b OR c)"),
vec![10002, 5001, 0]
);
Ok(())
}
#[test]
fn test_query_terms() {

View File

@@ -193,6 +193,14 @@ impl<TPostings: Postings> DocSet for PhrasePrefixScorer<TPostings> {
self.advance()
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
if self.phrase_scorer.seek_into_the_danger_zone(target) {
self.matches_prefix()
} else {
false
}
}
fn doc(&self) -> DocId {
self.phrase_scorer.doc()
}

View File

@@ -42,10 +42,11 @@ impl PhrasePrefixWeight {
Ok(FieldNormReader::constant(reader.max_doc(), 1))
}
pub(crate) fn phrase_scorer(
pub(crate) fn prefix_phrase_scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Option<PhrasePrefixScorer<SegmentPostings>>> {
let similarity_weight_opt = self
.similarity_weight_opt
@@ -54,14 +55,16 @@ impl PhrasePrefixWeight {
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms {
if let Some(postings) = reader
.inverted_index(term.field())?
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)?
{
term_postings_list.push((offset, postings));
} else {
let inverted_index = reader.inverted_index(term.field())?;
let Some(term_info) = inverted_index.get_term_info(term)? else {
return Ok(None);
}
};
let postings = inverted_index.read_postings_from_terminfo(
&term_info,
IndexRecordOption::WithFreqsAndPositions,
seek_doc,
)?;
term_postings_list.push((offset, postings));
}
let inv_index = reader.inverted_index(self.prefix.1.field())?;
@@ -114,8 +117,13 @@ impl PhrasePrefixWeight {
}
impl Weight for PhrasePrefixWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.prefix_phrase_scorer(reader, boost, seek_doc)? {
Ok(Box::new(scorer))
} else {
Ok(Box::new(EmptyScorer))
@@ -123,7 +131,7 @@ impl Weight for PhrasePrefixWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
let scorer_opt = self.prefix_phrase_scorer(reader, 1.0, doc)?;
if scorer_opt.is_none() {
return Err(does_not_match(doc));
}
@@ -140,6 +148,10 @@ impl Weight for PhrasePrefixWeight {
}
Ok(explanation)
}
fn intersection_priority(&self) -> u32 {
50u32
}
}
#[cfg(test)]
@@ -187,7 +199,7 @@ mod tests {
.unwrap()
.unwrap();
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
.prefix_phrase_scorer(searcher.segment_reader(0u32), 1.0, 0u32)?
.unwrap();
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);
@@ -214,7 +226,7 @@ mod tests {
.unwrap()
.unwrap();
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
.prefix_phrase_scorer(searcher.segment_reader(0u32), 1.0, 0u32)?
.unwrap();
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);
@@ -238,7 +250,7 @@ mod tests {
.unwrap()
.is_none());
let weight = phrase_query.weight(enable_scoring).unwrap();
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.advance(), 2);
assert_eq!(phrase_scorer.doc(), 2);
@@ -259,7 +271,7 @@ mod tests {
]);
let enable_scoring = EnableScoring::enabled_from_searcher(&searcher);
let weight = phrase_query.weight(enable_scoring).unwrap();
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let mut phrase_scorer = weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert_eq!(phrase_scorer.advance(), TERMINATED);
Ok(())
}

View File

@@ -84,7 +84,7 @@ pub(crate) mod tests {
let phrase_query = PhraseQuery::new(terms);
let phrase_weight =
phrase_query.phrase_weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0)?;
let mut phrase_scorer = phrase_weight.scorer(searcher.segment_reader(0), 1.0, 0)?;
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.advance(), TERMINATED);
Ok(())
@@ -343,6 +343,43 @@ pub(crate) mod tests {
Ok(())
}
#[test]
pub fn test_phrase_weight_seek_doc() -> crate::Result<()> {
// Create an index with documents where the phrase "a b" appears in some of them.
// Documents: 0: "c d", 1: "a b", 2: "e f", 3: "a b c", 4: "g h", 5: "a b", 6: "i j"
let index = create_index(&["c d", "a b", "e f", "a b c", "g h", "a b", "i j"])?;
let text_field = index.schema().get_field("text").unwrap();
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let phrase_query = PhraseQuery::new(vec![
Term::from_field_text(text_field, "a"),
Term::from_field_text(text_field, "b"),
]);
let phrase_weight =
phrase_query.phrase_weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
// Helper function to collect all docs from a scorer created with a given seek_doc
let docs_when_seeking_from = |seek_from: DocId| {
let scorer = phrase_weight
.scorer(segment_reader, 1.0f32, seek_from)
.unwrap();
crate::docset::docset_to_doc_vec(scorer)
};
// Documents with "a b": 1, 3, 5
assert_eq!(docs_when_seeking_from(0), vec![1, 3, 5]);
assert_eq!(docs_when_seeking_from(1), vec![1, 3, 5]);
assert_eq!(docs_when_seeking_from(2), vec![3, 5]);
assert_eq!(docs_when_seeking_from(3), vec![3, 5]);
assert_eq!(docs_when_seeking_from(4), vec![5]);
assert_eq!(docs_when_seeking_from(5), vec![5]);
assert_eq!(docs_when_seeking_from(6), Vec::<DocId>::new());
assert_eq!(docs_when_seeking_from(7), Vec::<DocId>::new());
Ok(())
}
#[test]
pub fn test_phrase_query_on_json() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
@@ -373,7 +410,7 @@ pub(crate) mod tests {
.weight(EnableScoring::disabled_from_schema(searcher.schema()))
.unwrap();
let mut phrase_scorer = phrase_weight
.scorer(searcher.segment_reader(0), 1.0f32)
.scorer(searcher.segment_reader(0), 1.0f32, 0)
.unwrap();
let mut docs = Vec::new();
loop {

View File

@@ -382,8 +382,9 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
PostingsWithOffset::new(postings, (max_offset - offset) as u32)
})
.collect::<Vec<_>>();
let intersection_docset = Intersection::new(postings_with_offsets, num_docs);
let mut scorer = PhraseScorer {
intersection_docset: Intersection::new(postings_with_offsets, num_docs),
intersection_docset,
num_terms: num_docsets,
left_positions: Vec::with_capacity(100),
right_positions: Vec::with_capacity(100),
@@ -529,20 +530,34 @@ impl<TPostings: Postings> DocSet for PhraseScorer<TPostings> {
self.advance()
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
debug_assert!(target >= self.doc());
if self.intersection_docset.seek_into_the_danger_zone(target) && self.phrase_match() {
return true;
}
false
}
fn doc(&self) -> DocId {
self.intersection_docset.doc()
}
fn size_hint(&self) -> u32 {
self.intersection_docset.size_hint()
// We adjust the intersection estimate, since actual phrase hits are much lower than where
// the all appear.
// The estimate should depend on average field length, e.g. if the field is really short
// a phrase hit is more likely
self.intersection_docset.size_hint() / (10 * self.num_terms as u32)
}
/// Returns a best-effort hint of the
/// cost to drive the docset.
fn cost(&self) -> u64 {
// Evaluating phrase matches is generally more expensive than simple term matches,
// as it requires loading and comparing positions. Use a conservative multiplier
// based on the number of terms.
// While determing a potential hit is cheap for phrases, evaluating an actual hit is
// expensive since it requires to load positions for a doc and check if they are next to
// each other.
// So the cost estimation would be the number of times we need to check if a doc is a hit *
// 10 * self.num_terms.
self.intersection_docset.size_hint() as u64 * 10 * self.num_terms as u64
}
}

View File

@@ -43,6 +43,7 @@ impl PhraseWeight {
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> {
let similarity_weight_opt = self
.similarity_weight_opt
@@ -51,14 +52,16 @@ impl PhraseWeight {
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms {
if let Some(postings) = reader
.inverted_index(term.field())?
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)?
{
term_postings_list.push((offset, postings));
} else {
let inverted_index = reader.inverted_index(term.field())?;
let Some(term_info) = inverted_index.get_term_info(term)? else {
return Ok(None);
}
};
let postings = inverted_index.read_postings_from_terminfo(
&term_info,
IndexRecordOption::WithFreqsAndPositions,
seek_doc,
)?;
term_postings_list.push((offset, postings));
}
Ok(Some(PhraseScorer::new(
term_postings_list,
@@ -74,8 +77,13 @@ impl PhraseWeight {
}
impl Weight for PhraseWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost, seek_doc)? {
Ok(Box::new(scorer))
} else {
Ok(Box::new(EmptyScorer))
@@ -83,12 +91,12 @@ impl Weight for PhraseWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let scorer_opt = self.phrase_scorer(reader, 1.0)?;
let scorer_opt = self.phrase_scorer(reader, 1.0, doc)?;
if scorer_opt.is_none() {
return Err(does_not_match(doc));
}
let mut scorer = scorer_opt.unwrap();
if scorer.seek(doc) != doc {
if scorer.doc() != doc {
return Err(does_not_match(doc));
}
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
@@ -100,6 +108,10 @@ impl Weight for PhraseWeight {
}
Ok(explanation)
}
fn intersection_priority(&self) -> u32 {
40u32
}
}
#[cfg(test)]
@@ -122,7 +134,7 @@ mod tests {
let enable_scoring = EnableScoring::enabled_from_searcher(&searcher);
let phrase_weight = phrase_query.phrase_weight(enable_scoring).unwrap();
let mut phrase_scorer = phrase_weight
.phrase_scorer(searcher.segment_reader(0u32), 1.0)?
.phrase_scorer(searcher.segment_reader(0u32), 1.0, 0)?
.unwrap();
assert_eq!(phrase_scorer.doc(), 1);
assert_eq!(phrase_scorer.phrase_count(), 2);

View File

@@ -195,8 +195,11 @@ impl RegexPhraseWeight {
const SPARSE_TERM_DOC_THRESHOLD: u32 = 100;
for term_info in term_infos {
let mut term_posting = inverted_index
.read_postings_from_terminfo(term_info, IndexRecordOption::WithFreqsAndPositions)?;
let mut term_posting = inverted_index.read_postings_from_terminfo(
term_info,
IndexRecordOption::WithFreqsAndPositions,
0u32,
)?;
let num_docs = term_posting.doc_freq();
if num_docs < SPARSE_TERM_DOC_THRESHOLD {
@@ -269,7 +272,12 @@ impl RegexPhraseWeight {
}
impl Weight for RegexPhraseWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
_seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
if let Some(scorer) = self.phrase_scorer(reader, boost)? {
Ok(Box::new(scorer))
} else {

View File

@@ -61,12 +61,27 @@ pub(crate) struct RangeDocSet<T> {
const DEFAULT_FETCH_HORIZON: u32 = 128;
impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
pub(crate) fn new(value_range: RangeInclusive<T>, column: Column<T>) -> Self {
pub(crate) fn new(
value_range: RangeInclusive<T>,
column: Column<T>,
seek_first_doc: DocId,
) -> Self {
if *value_range.start() > column.max_value() || *value_range.end() < column.min_value() {
return Self {
value_range,
column,
loaded_docs: VecCursor::new(),
next_fetch_start: TERMINATED,
fetch_horizon: DEFAULT_FETCH_HORIZON,
last_seek_pos_opt: None,
};
}
let mut range_docset = Self {
value_range,
column,
loaded_docs: VecCursor::new(),
next_fetch_start: 0,
next_fetch_start: seek_first_doc,
fetch_horizon: DEFAULT_FETCH_HORIZON,
last_seek_pos_opt: None,
};
@@ -81,6 +96,9 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
/// Returns true if more data could be fetched
fn fetch_block(&mut self) {
if self.next_fetch_start >= self.column.num_docs() {
return;
}
const MAX_HORIZON: u32 = 100_000;
while self.loaded_docs.is_empty() {
let finished_to_end = self.fetch_horizon(self.fetch_horizon);
@@ -105,10 +123,10 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
fn fetch_horizon(&mut self, horizon: u32) -> bool {
let mut finished_to_end = false;
let limit = self.column.num_docs();
let mut end = self.next_fetch_start + horizon;
if end >= limit {
end = limit;
let num_docs = self.column.num_docs();
let mut fetch_end = self.next_fetch_start + horizon;
if fetch_end >= num_docs {
fetch_end = num_docs;
finished_to_end = true;
}
@@ -116,7 +134,7 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
let doc_buffer: &mut Vec<DocId> = self.loaded_docs.get_cleared_data();
self.column.get_docids_for_value_range(
self.value_range.clone(),
self.next_fetch_start..end,
self.next_fetch_start..fetch_end,
doc_buffer,
);
if let Some(last_doc) = last_doc {
@@ -124,7 +142,7 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> RangeDocSet<T> {
self.loaded_docs.next();
}
}
self.next_fetch_start = end;
self.next_fetch_start = fetch_end;
finished_to_end
}
@@ -136,9 +154,6 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> DocSet for RangeDocSe
if let Some(docid) = self.loaded_docs.next() {
return docid;
}
if self.next_fetch_start >= self.column.num_docs() {
return TERMINATED;
}
self.fetch_block();
self.loaded_docs.current().unwrap_or(TERMINATED)
}
@@ -174,15 +189,25 @@ impl<T: Send + Sync + PartialOrd + Copy + Debug + 'static> DocSet for RangeDocSe
}
fn size_hint(&self) -> u32 {
self.column.num_docs()
// TODO: Implement a better size hint
self.column.num_docs() / 10
}
/// Returns a best-effort hint of the
/// cost to drive the docset.
fn cost(&self) -> u64 {
// Advancing the docset is relatively expensive since it scans the column.
// Keep cost relative to a term query driver; use num_docs as baseline.
self.column.num_docs() as u64
// Advancing the docset is pretty expensive since it scans the whole column, there is no
// index currently (will change with an kd-tree)
// Since we use SIMD to scan the fast field range query we lower the cost a little bit,
// assuming that we hit 10% of the docs like in size_hint.
//
// If we would return a cost higher than num_docs, we would never choose ff range query as
// the driver in a DocSet, when intersecting a term query with a fast field. But
// it's the faster choice when the term query has a lot of docids and the range
// query has not.
//
// Ideally this would take the fast field codec into account
(self.column.num_docs() as f64 * 0.8) as u64
}
}
@@ -236,4 +261,52 @@ mod tests {
let count = searcher.search(&query, &Count).unwrap();
assert_eq!(count, 500);
}
#[test]
fn range_query_no_overlap_optimization() {
let mut schema_builder = schema::SchemaBuilder::new();
let id_field = schema_builder.add_text_field("id", schema::STRING);
let value_field = schema_builder.add_u64_field("value", schema::FAST | schema::INDEXED);
let dir = RamDirectory::default();
let index = IndexBuilder::new()
.schema(schema_builder.build())
.open_or_create(dir)
.unwrap();
{
let mut writer = index.writer(15_000_000).unwrap();
// Add documents with values in the range [10, 20]
for i in 0..100 {
let mut doc = TantivyDocument::new();
doc.add_text(id_field, format!("doc{i}"));
doc.add_u64(value_field, 10 + (i % 11) as u64); // values in range 10-20
writer.add_document(doc).unwrap();
}
writer.commit().unwrap();
}
let reader = index.reader().unwrap();
let searcher = reader.searcher();
// Test a range query [100, 200] that has no overlap with data range [10, 20]
let query = RangeQuery::new(
Bound::Included(Term::from_field_u64(value_field, 100)),
Bound::Included(Term::from_field_u64(value_field, 200)),
);
let count = searcher.search(&query, &Count).unwrap();
assert_eq!(count, 0); // should return 0 results since there's no overlap
// Test another non-overlapping range: [0, 5] while data range is [10, 20]
let query2 = RangeQuery::new(
Bound::Included(Term::from_field_u64(value_field, 0)),
Bound::Included(Term::from_field_u64(value_field, 5)),
);
let count2 = searcher.search(&query2, &Count).unwrap();
assert_eq!(count2, 0); // should return 0 results since there's no overlap
}
}

View File

@@ -212,7 +212,12 @@ impl InvertedIndexRangeWeight {
}
impl Weight for InvertedIndexRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
let max_doc = reader.max_doc();
let mut doc_bitset = BitSet::with_max_value(max_doc);
@@ -229,7 +234,12 @@ impl Weight for InvertedIndexRangeWeight {
processed_count += 1;
let term_info = term_range.value();
let mut block_segment_postings = inverted_index
.read_block_postings_from_terminfo(term_info, IndexRecordOption::Basic)?;
.read_block_postings_from_terminfo_with_seek(
term_info,
IndexRecordOption::Basic,
seek_doc,
)?
.0;
loop {
let docs = block_segment_postings.docs();
if docs.is_empty() {
@@ -246,7 +256,7 @@ impl Weight for InvertedIndexRangeWeight {
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
if scorer.seek(doc) != doc {
return Err(does_not_match(doc));
}
@@ -686,7 +696,7 @@ mod tests {
.weight(EnableScoring::disabled_from_schema(&schema))
.unwrap();
let range_scorer = range_weight
.scorer(&searcher.segment_readers()[0], 1.0f32)
.scorer(&searcher.segment_readers()[0], 1.0f32, 0)
.unwrap();
range_scorer
};

View File

@@ -52,7 +52,12 @@ impl FastFieldRangeWeight {
}
impl Weight for FastFieldRangeWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
// Check if both bounds are Bound::Unbounded
if self.bounds.is_unbounded() {
return Ok(Box::new(AllScorer::new(reader.max_doc())));
@@ -109,11 +114,21 @@ impl Weight for FastFieldRangeWeight {
else {
return Ok(Box::new(EmptyScorer));
};
search_on_u64_ff(column, boost, BoundsRange::new(lower_bound, upper_bound))
}
Type::U64 | Type::I64 | Type::F64 => {
search_on_json_numerical_field(reader, &field_name, typ, bounds, boost)
search_on_u64_ff(
column,
boost,
BoundsRange::new(lower_bound, upper_bound),
seek_doc,
)
}
Type::U64 | Type::I64 | Type::F64 => search_on_json_numerical_field(
reader,
&field_name,
typ,
bounds,
boost,
seek_doc,
),
Type::Date => {
let fast_field_reader = reader.fast_fields();
let Some((column, _col_type)) = fast_field_reader
@@ -126,6 +141,7 @@ impl Weight for FastFieldRangeWeight {
column,
boost,
BoundsRange::new(bounds.lower_bound, bounds.upper_bound),
seek_doc,
)
}
Type::Bool | Type::Facet | Type::Bytes | Type::Json | Type::IpAddr => {
@@ -154,7 +170,7 @@ impl Weight for FastFieldRangeWeight {
ip_addr_column.min_value(),
ip_addr_column.max_value(),
);
let docset = RangeDocSet::new(value_range, ip_addr_column);
let docset = RangeDocSet::new(value_range, ip_addr_column, seek_doc);
Ok(Box::new(ConstScorer::new(docset, boost)))
} else if field_type.is_str() {
let Some(str_dict_column): Option<StrColumn> = reader.fast_fields().str(&field_name)?
@@ -173,7 +189,12 @@ impl Weight for FastFieldRangeWeight {
else {
return Ok(Box::new(EmptyScorer));
};
search_on_u64_ff(column, boost, BoundsRange::new(lower_bound, upper_bound))
search_on_u64_ff(
column,
boost,
BoundsRange::new(lower_bound, upper_bound),
seek_doc,
)
} else {
assert!(
maps_to_u64_fastfield(field_type.value_type()),
@@ -215,12 +236,13 @@ impl Weight for FastFieldRangeWeight {
column,
boost,
BoundsRange::new(bounds.lower_bound, bounds.upper_bound),
seek_doc,
)
}
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
if scorer.seek(doc) != doc {
return Err(TantivyError::InvalidArgument(format!(
"Document #({doc}) does not match"
@@ -230,6 +252,10 @@ impl Weight for FastFieldRangeWeight {
Ok(explanation)
}
fn intersection_priority(&self) -> u32 {
30u32
}
}
/// On numerical fields the column type may not match the user provided one.
@@ -241,6 +267,7 @@ fn search_on_json_numerical_field(
typ: Type,
bounds: BoundsRange<ValueBytes<Vec<u8>>>,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
// Since we don't know which type was interpolated for the internal column we
// have to check for all numeric types (only one exists)
@@ -318,6 +345,7 @@ fn search_on_json_numerical_field(
column,
boost,
BoundsRange::new(bounds.lower_bound, bounds.upper_bound),
seek_doc,
)
}
@@ -396,6 +424,7 @@ fn search_on_u64_ff(
column: Column<u64>,
boost: Score,
bounds: BoundsRange<u64>,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
let col_min_value = column.min_value();
let col_max_value = column.max_value();
@@ -426,8 +455,8 @@ fn search_on_u64_ff(
}
}
let docset = RangeDocSet::new(value_range, column);
Ok(Box::new(ConstScorer::new(docset, boost)))
let doc_set = RangeDocSet::new(value_range, column, seek_doc);
Ok(Box::new(ConstScorer::new(doc_set, boost)))
}
/// Returns true if the type maps to a u64 fast field
@@ -504,7 +533,7 @@ mod tests {
DateOptions, Field, NumericOptions, Schema, SchemaBuilder, FAST, INDEXED, STORED, STRING,
TEXT,
};
use crate::{Index, IndexWriter, TantivyDocument, Term, TERMINATED};
use crate::{DocId, Index, IndexWriter, TantivyDocument, Term, TERMINATED};
#[test]
fn test_text_field_ff_range_query() -> crate::Result<()> {
@@ -1142,11 +1171,52 @@ mod tests {
Bound::Included(Term::from_field_u64(field, 50_002)),
));
let scorer = range_query
.scorer(searcher.segment_reader(0), 1.0f32)
.scorer(searcher.segment_reader(0), 1.0f32, 0)
.unwrap();
assert_eq!(scorer.doc(), TERMINATED);
}
#[test]
fn test_fastfield_range_weight_seek_doc() {
let mut schema_builder = SchemaBuilder::new();
let field = schema_builder.add_u64_field("value", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut writer: IndexWriter = index.writer_for_tests().unwrap();
// Create 20 documents with values
// 0, 10, 20, ..., 90
// and then 50 again.
for i in 0..10 {
writer.add_document(doc!(field => (i * 10) as u64)).unwrap();
}
writer.add_document(doc!(field => 50u64)).unwrap();
writer.commit().unwrap();
let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0);
let range_weight = FastFieldRangeWeight::new(BoundsRange::new(
Bound::Included(Term::from_field_u64(field, 30)),
Bound::Included(Term::from_field_u64(field, 70)),
));
let doc_when_seeking_from = |seek_from: DocId| {
let doc_set = range_weight
.scorer(segment_reader, 1.0f32, seek_from)
.unwrap();
crate::docset::docset_to_doc_vec(doc_set)
};
assert_eq!(doc_when_seeking_from(0), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(1), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(3), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(7), vec![7, 10]);
assert_eq!(doc_when_seeking_from(8), vec![10]);
assert_eq!(doc_when_seeking_from(10), vec![10]);
assert_eq!(doc_when_seeking_from(11), Vec::<DocId>::new());
}
#[test]
fn range_regression3_test() {
let ops = vec![doc_from_id_1(1), doc_from_id_1(2), doc_from_id_1(3)];
@@ -1598,449 +1668,3 @@ pub(crate) mod ip_range_tests {
Ok(())
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use test::Bencher;
use super::tests::*;
use super::*;
use crate::collector::Count;
use crate::query::QueryParser;
use crate::Index;
fn get_index_0_to_100() -> Index {
let mut rng = StdRng::from_seed([1u8; 32]);
let num_vals = 100_000;
let docs: Vec<_> = (0..num_vals)
.map(|_i| {
let id_name = if rng.gen_bool(0.01) {
"veryfew".to_string() // 1%
} else if rng.gen_bool(0.1) {
"few".to_string() // 9%
} else {
"many".to_string() // 90%
};
Doc {
id_name,
id: rng.gen_range(0..100),
}
})
.collect();
create_index_from_docs(&docs, false)
}
fn get_90_percent() -> RangeInclusive<u64> {
0..=90
}
fn get_10_percent() -> RangeInclusive<u64> {
0..=10
}
fn get_1_percent() -> RangeInclusive<u64> {
10..=10
}
fn execute_query(
field: &str,
id_range: RangeInclusive<u64>,
suffix: &str,
index: &Index,
) -> usize {
let gen_query_inclusive = |from: &u64, to: &u64| {
format!(
"{}:[{} TO {}] {}",
field,
&from.to_string(),
&to.to_string(),
suffix
)
};
let query = gen_query_inclusive(id_range.start(), id_range.end());
let query_from_text = |text: &str| {
QueryParser::for_index(index, vec![])
.parse_query(text)
.unwrap()
};
let query = query_from_text(&query);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
searcher.search(&query, &(Count)).unwrap()
}
#[bench]
fn bench_id_range_hit_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_90_percent(), "", &index));
}
#[bench]
fn bench_id_range_hit_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_10_percent(), "", &index));
}
#[bench]
fn bench_id_range_hit_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_1_percent(), "", &index));
}
#[bench]
fn bench_id_range_hit_10_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_10_percent(), "AND id_name:few", &index));
}
#[bench]
fn bench_id_range_hit_1_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_1_percent(), "AND id_name:few", &index));
}
#[bench]
fn bench_id_range_hit_1_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_1_percent(), "AND id_name:many", &index));
}
#[bench]
fn bench_id_range_hit_1_percent_intersect_with_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_1_percent(), "AND id_name:veryfew", &index));
}
#[bench]
fn bench_id_range_hit_10_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_10_percent(), "AND id_name:many", &index));
}
#[bench]
fn bench_id_range_hit_90_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_90_percent(), "AND id_name:many", &index));
}
#[bench]
fn bench_id_range_hit_90_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_90_percent(), "AND id_name:few", &index));
}
#[bench]
fn bench_id_range_hit_90_percent_intersect_with_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("id", get_90_percent(), "AND id_name:veryfew", &index));
}
#[bench]
fn bench_id_range_hit_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_90_percent(), "", &index));
}
#[bench]
fn bench_id_range_hit_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_10_percent(), "", &index));
}
#[bench]
fn bench_id_range_hit_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_1_percent(), "", &index));
}
#[bench]
fn bench_id_range_hit_10_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_10_percent(), "AND id_name:few", &index));
}
#[bench]
fn bench_id_range_hit_1_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_1_percent(), "AND id_name:few", &index));
}
#[bench]
fn bench_id_range_hit_1_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_1_percent(), "AND id_name:many", &index));
}
#[bench]
fn bench_id_range_hit_1_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_1_percent(), "AND id_name:veryfew", &index));
}
#[bench]
fn bench_id_range_hit_10_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_10_percent(), "AND id_name:many", &index));
}
#[bench]
fn bench_id_range_hit_90_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_90_percent(), "AND id_name:many", &index));
}
#[bench]
fn bench_id_range_hit_90_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_90_percent(), "AND id_name:few", &index));
}
#[bench]
fn bench_id_range_hit_90_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ids", get_90_percent(), "AND id_name:veryfew", &index));
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench_ip {
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use test::Bencher;
use super::ip_range_tests::*;
use super::*;
use crate::collector::Count;
use crate::query::QueryParser;
use crate::Index;
fn get_index_0_to_100() -> Index {
let mut rng = StdRng::from_seed([1u8; 32]);
let num_vals = 100_000;
let docs: Vec<_> = (0..num_vals)
.map(|_i| {
let id = if rng.gen_bool(0.01) {
"veryfew".to_string() // 1%
} else if rng.gen_bool(0.1) {
"few".to_string() // 9%
} else {
"many".to_string() // 90%
};
Doc {
id,
// Multiply by 1000, so that we create many buckets in the compact space
// The benches depend on this range to select n-percent of elements with the
// methods below.
ip: Ipv6Addr::from_u128(rng.gen_range(0..100) * 1000),
}
})
.collect();
create_index_from_ip_docs(&docs)
}
fn get_90_percent() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(90 * 1000);
start..=end
}
fn get_10_percent() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(0);
let end = Ipv6Addr::from_u128(10 * 1000);
start..=end
}
fn get_1_percent() -> RangeInclusive<Ipv6Addr> {
let start = Ipv6Addr::from_u128(10 * 1000);
let end = Ipv6Addr::from_u128(10 * 1000);
start..=end
}
fn execute_query(
field: &str,
ip_range: RangeInclusive<Ipv6Addr>,
suffix: &str,
index: &Index,
) -> usize {
let gen_query_inclusive = |from: &Ipv6Addr, to: &Ipv6Addr| {
format!(
"{}:[{} TO {}] {}",
field,
&from.to_string(),
&to.to_string(),
suffix
)
};
let query = gen_query_inclusive(ip_range.start(), ip_range.end());
let query_from_text = |text: &str| {
QueryParser::for_index(index, vec![])
.parse_query(text)
.unwrap()
};
let query = query_from_text(&query);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
searcher.search(&query, &(Count)).unwrap()
}
#[bench]
fn bench_ip_range_hit_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_90_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_10_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_1_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_10_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_1_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_1_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_1_percent(), "AND id:veryfew", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_10_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_90_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_90_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_10_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_90_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_1_percent(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ip", get_90_percent(), "AND id:veryfew", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_90_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_10_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_1_percent(), "", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_10_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_1_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_1_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_1_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_1_percent(), "AND id:veryfew", &index));
}
#[bench]
fn bench_ip_range_hit_10_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_10_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_90_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_90_percent(), "AND id:many", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_10_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_90_percent(), "AND id:few", &index));
}
#[bench]
fn bench_ip_range_hit_90_percent_intersect_with_1_percent_multi(bench: &mut Bencher) {
let index = get_index_0_to_100();
bench.iter(|| execute_query("ips", get_90_percent(), "AND id:veryfew", &index));
}
}

View File

@@ -56,6 +56,11 @@ where
self.req_scorer.seek(target)
}
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
self.score_cache = None;
self.req_scorer.seek_into_the_danger_zone(target)
}
fn doc(&self) -> DocId {
self.req_scorer.doc()
}

View File

@@ -37,7 +37,7 @@ mod tests {
);
let term_weight = term_query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader, 1.0)?;
let mut term_scorer = term_weight.scorer(segment_reader, 1.0, 0)?;
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.score(), 0.28768212);
Ok(())
@@ -65,7 +65,7 @@ mod tests {
);
let term_weight = term_query.weight(EnableScoring::enabled_from_searcher(&searcher))?;
let segment_reader = searcher.segment_reader(0);
let mut term_scorer = term_weight.scorer(segment_reader, 1.0)?;
let mut term_scorer = term_weight.scorer(segment_reader, 1.0, 0)?;
for i in 0u32..COMPRESSION_BLOCK_SIZE as u32 {
assert_eq!(term_scorer.doc(), i);
if i == COMPRESSION_BLOCK_SIZE as u32 - 1u32 {
@@ -162,7 +162,7 @@ mod tests {
let searcher = index.reader()?.searcher();
let term_weight =
term_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0)?;
let mut term_scorer = term_weight.scorer(searcher.segment_reader(0u32), 1.0, 0)?;
assert_eq!(term_scorer.doc(), 0u32);
term_scorer.seek(1u32);
assert_eq!(term_scorer.doc(), 1u32);
@@ -470,7 +470,7 @@ mod tests {
.weight(EnableScoring::disabled_from_schema(&schema))
.unwrap();
term_weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.scorer(searcher.segment_reader(0u32), 1.0f32, 0)
.unwrap()
};
// Should be an allscorer
@@ -484,6 +484,53 @@ mod tests {
assert!(empty_scorer.is::<EmptyScorer>());
}
#[test]
fn test_term_weight_seek_doc() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer: IndexWriter = index.writer_for_tests()?;
// Create 11 documents where docs 3, 4, 5, 6, 7, and 10 contain "target"
// (similar pattern to test_fastfield_range_weight_seek_doc)
for i in 0..11 {
if i == 3 || i == 4 || i == 5 || i == 6 || i == 7 || i == 10 {
index_writer.add_document(doc!(text_field => "target"))?;
} else {
index_writer.add_document(doc!(text_field => "other"))?;
}
}
index_writer.commit()?;
let searcher = index.reader()?.searcher();
let segment_reader = searcher.segment_reader(0);
let term_query = TermQuery::new(
Term::from_field_text(text_field, "target"),
IndexRecordOption::Basic,
);
let term_weight =
term_query.weight(EnableScoring::disabled_from_schema(searcher.schema()))?;
let doc_when_seeking_from = |seek_from: crate::DocId| {
let scorer = term_weight
.scorer(segment_reader, 1.0f32, seek_from)
.unwrap();
crate::docset::docset_to_doc_vec(scorer)
};
assert_eq!(doc_when_seeking_from(0), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(1), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(3), vec![3, 4, 5, 6, 7, 10]);
assert_eq!(doc_when_seeking_from(7), vec![7, 10]);
assert_eq!(doc_when_seeking_from(8), vec![10]);
assert_eq!(doc_when_seeking_from(10), vec![10]);
assert_eq!(doc_when_seeking_from(11), Vec::<crate::DocId>::new());
Ok(())
}
#[test]
fn test_term_weight_all_query_optimization_disable_when_scoring_enabled() {
let mut schema_builder = Schema::builder();
@@ -509,7 +556,7 @@ mod tests {
.weight(EnableScoring::enabled_from_searcher(&searcher))
.unwrap();
term_weight
.scorer(searcher.segment_reader(0u32), 1.0f32)
.scorer(searcher.segment_reader(0u32), 1.0f32, 0)
.unwrap()
};
// Should be an allscorer

View File

@@ -98,14 +98,17 @@ impl TermScorer {
}
impl DocSet for TermScorer {
#[inline]
fn advance(&mut self) -> DocId {
self.postings.advance()
}
#[inline]
fn seek(&mut self, target: DocId) -> DocId {
self.postings.seek(target)
}
#[inline]
fn doc(&self) -> DocId {
self.postings.doc()
}

View File

@@ -34,12 +34,19 @@ impl TermOrEmptyOrAllScorer {
}
impl Weight for TermWeight {
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
Ok(self.specialized_scorer(reader, boost)?.into_boxed_scorer())
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>> {
Ok(self
.specialized_scorer(reader, boost, seek_doc)?
.into_boxed_scorer())
}
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
match self.specialized_scorer(reader, 1.0)? {
match self.specialized_scorer(reader, 1.0, doc)? {
TermOrEmptyOrAllScorer::TermScorer(mut term_scorer) => {
if term_scorer.doc() > doc || term_scorer.seek(doc) != doc {
return Err(does_not_match(doc));
@@ -55,7 +62,7 @@ impl Weight for TermWeight {
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
if let Some(alive_bitset) = reader.alive_bitset() {
Ok(self.scorer(reader, 1.0)?.count(alive_bitset))
Ok(self.scorer(reader, 1.0, 0)?.count(alive_bitset))
} else {
let field = self.term.field();
let inv_index = reader.inverted_index(field)?;
@@ -71,7 +78,7 @@ impl Weight for TermWeight {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
match self.specialized_scorer(reader, 1.0)? {
match self.specialized_scorer(reader, 1.0, 0u32)? {
TermOrEmptyOrAllScorer::TermScorer(mut term_scorer) => {
for_each_scorer(&mut *term_scorer, callback);
}
@@ -90,7 +97,7 @@ impl Weight for TermWeight {
reader: &SegmentReader,
callback: &mut dyn FnMut(&[DocId]),
) -> crate::Result<()> {
match self.specialized_scorer(reader, 1.0)? {
match self.specialized_scorer(reader, 1.0, 0u32)? {
TermOrEmptyOrAllScorer::TermScorer(mut term_scorer) => {
let mut buffer = [0u32; COLLECT_BLOCK_BUFFER_LEN];
for_each_docset_buffered(&mut term_scorer, &mut buffer, callback);
@@ -121,7 +128,7 @@ impl Weight for TermWeight {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let specialized_scorer = self.specialized_scorer(reader, 1.0)?;
let specialized_scorer = self.specialized_scorer(reader, 1.0, 0u32)?;
match specialized_scorer {
TermOrEmptyOrAllScorer::TermScorer(term_scorer) => {
crate::query::boolean_query::block_wand_single_scorer(
@@ -139,6 +146,12 @@ impl Weight for TermWeight {
}
Ok(())
}
/// Returns a priority number used to sort weights when running an
/// intersection.
fn intersection_priority(&self) -> u32 {
10u32
}
}
impl TermWeight {
@@ -169,7 +182,7 @@ impl TermWeight {
reader: &SegmentReader,
boost: Score,
) -> crate::Result<Option<TermScorer>> {
let scorer = self.specialized_scorer(reader, boost)?;
let scorer = self.specialized_scorer(reader, boost, 0u32)?;
Ok(match scorer {
TermOrEmptyOrAllScorer::TermScorer(scorer) => Some(*scorer),
_ => None,
@@ -180,6 +193,7 @@ impl TermWeight {
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<TermOrEmptyOrAllScorer> {
let field = self.term.field();
let inverted_index = reader.inverted_index(field)?;
@@ -196,8 +210,11 @@ impl TermWeight {
)));
}
let segment_postings: SegmentPostings =
inverted_index.read_postings_from_terminfo(&term_info, self.index_record_option)?;
let segment_postings: SegmentPostings = inverted_index.read_postings_from_terminfo(
&term_info,
self.index_record_option,
seek_doc,
)?;
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let similarity_weight = self.similarity_weight.boost_by(boost);

View File

@@ -15,7 +15,7 @@ const HORIZON: u32 = 64u32 * 64u32;
// This function is similar except that it does is not unstable, and
// it does not keep the original vector ordering.
//
// Also, it does not "yield" any elements.
// Elements are dropped and not yielded.
fn unordered_drain_filter<T, P>(v: &mut Vec<T>, mut predicate: P)
where P: FnMut(&mut T) -> bool {
let mut i = 0;
@@ -143,6 +143,12 @@ impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> BufferedUnionScorer<TScorer
}
false
}
fn is_in_horizon(&self, target: DocId) -> bool {
// wrapping_sub, because target may be < window_start_doc
let gap = target.wrapping_sub(self.window_start_doc);
gap < HORIZON
}
}
impl<TScorer, TScoreCombiner> DocSet for BufferedUnionScorer<TScorer, TScoreCombiner>
@@ -217,7 +223,27 @@ where
}
}
// TODO Also implement `count` with deletes efficiently.
fn seek_into_the_danger_zone(&mut self, target: DocId) -> bool {
if self.is_in_horizon(target) {
// Our value is within the buffered horizon and the docset may already have been
// processed and removed, so we need to use seek, which uses the regular advance.
self.seek(target) == target
} else {
// The docsets are not in the buffered range, so we can use seek_into_the_danger_zone
// of the underlying docsets
let is_hit = self
.docsets
.iter_mut()
.any(|docset| docset.seek_into_the_danger_zone(target));
// The API requires the DocSet to be in a valid state when `seek_into_the_danger_zone`
// returns true.
if is_hit {
self.seek(target);
}
is_hit
}
}
fn doc(&self) -> DocId {
self.doc
@@ -231,6 +257,7 @@ where
self.docsets.iter().map(|docset| docset.cost()).sum()
}
// TODO Also implement `count` with deletes efficiently.
fn count_including_deleted(&mut self) -> u32 {
if self.doc == TERMINATED {
return 0;

View File

@@ -92,6 +92,7 @@ impl<TDocSet: DocSet> DocSet for SimpleUnion<TDocSet> {
}
fn size_hint(&self) -> u32 {
// TODO: use estimate_union
self.docsets
.iter()
.map(|docset| docset.size_hint())

View File

@@ -68,15 +68,28 @@ pub trait Weight: Send + Sync + 'static {
///
/// `boost` is a multiplier to apply to the score.
///
/// As an optimization, the scorer can be positioned on any document below `seek_doc`
/// matching the request.
/// If there are no such document, it should match the first document matching the request;
/// (or TERMINATED if no documents match).
///
/// Entirely ignoring that parameter and positionning the Scorer on the first document
/// is always correct.
///
/// See [`Query`](crate::query::Query).
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>>;
fn scorer(
&self,
reader: &SegmentReader,
boost: Score,
seek_doc: DocId,
) -> crate::Result<Box<dyn Scorer>>;
/// Returns an [`Explanation`] for the given document.
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation>;
/// Returns the number documents within the given [`SegmentReader`].
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
if let Some(alive_bitset) = reader.alive_bitset() {
Ok(scorer.count(alive_bitset))
} else {
@@ -91,7 +104,7 @@ pub trait Weight: Send + Sync + 'static {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score),
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
for_each_scorer(scorer.as_mut(), callback);
Ok(())
}
@@ -103,7 +116,7 @@ pub trait Weight: Send + Sync + 'static {
reader: &SegmentReader,
callback: &mut dyn FnMut(&[DocId]),
) -> crate::Result<()> {
let mut docset = self.scorer(reader, 1.0)?;
let mut docset = self.scorer(reader, 1.0, 0)?;
let mut buffer = [0u32; COLLECT_BLOCK_BUFFER_LEN];
for_each_docset_buffered(&mut docset, &mut buffer, callback);
@@ -126,8 +139,18 @@ pub trait Weight: Send + Sync + 'static {
reader: &SegmentReader,
callback: &mut dyn FnMut(DocId, Score) -> Score,
) -> crate::Result<()> {
let mut scorer = self.scorer(reader, 1.0)?;
let mut scorer = self.scorer(reader, 1.0, 0)?;
for_each_pruning_scorer(scorer.as_mut(), threshold, callback);
Ok(())
}
/// Returns a priority number used to sort weights when running an
/// intersection.
///
/// Tweaking this value only impacts performance.
/// A higher priority means that the `.scorer()` will be more likely to be evaluated
/// after the sibling weights, and be passed a higher `seek_doc` value as a result.
fn intersection_priority(&self) -> u32 {
20u32
}
}

View File

@@ -117,7 +117,7 @@ impl ExpUnrolledLinkedListWriter<'_> {
fn get_block_size(block_num: u32) -> u16 {
// Cap at 15 to prevent block sizes > 32KB
// block_num can now be much larger than 15, but block size maxes out
let exp = block_num.min(15) as u32;
let exp: u32 = block_num.min(15u32);
(1u32 << exp) as u16
}
@@ -309,6 +309,7 @@ mod tests {
}
#[test]
#[allow(clippy::needless_range_loop)]
fn test_large_dataset_simulation() {
// Simulate the scenario: large arrays requiring many blocks
// We write enough data to require thousands of blocks
@@ -452,8 +453,10 @@ mod tests {
fn test_increment_overflow_protection() {
// Test that we panic gracefully if we somehow hit u32::MAX
// This is extremely unlikely in practice (would require 128TB of data)
let mut eull = ExpUnrolledLinkedList::default();
eull.block_num = u32::MAX;
let mut eull = ExpUnrolledLinkedList {
block_num: u32::MAX,
..Default::default()
};
// This should panic with our custom error message
eull.increment_num_blocks();