Optimize ExistsQuery for a high number of dynamic columns (#2694)

* Optimize ExistsQuery for a high number of dynamic columns

The previous algorithm checked _each_ doc in _each_ column for
existence. This causes huge cost on JSON fields with e.g. 100k columns.
Compute a bitset instead if we have more than one column.

add `iter_docs` to the multivalued_index

* add benchmark

subfields=1
exists_json_union    Memory: 89.3 KB (+2.01%)    Avg: 0.4865ms (-26.03%)    Median: 0.4865ms (-26.03%)    [0.4865ms .. 0.4865ms]
subfields=2
exists_json_union    Memory: 68.1 KB     Avg: 1.7048ms (-0.46%)    Median: 1.7048ms (-0.46%)    [1.7048ms .. 1.7048ms]
subfields=3
exists_json_union    Memory: 61.8 KB     Avg: 2.0742ms (-2.22%)    Median: 2.0742ms (-2.22%)    [2.0742ms .. 2.0742ms]
subfields=4
exists_json_union    Memory: 119.8 KB (+103.44%)    Avg: 3.9500ms (+42.62%)    Median: 3.9500ms (+42.62%)    [3.9500ms .. 3.9500ms]
subfields=5
exists_json_union    Memory: 120.4 KB (+107.65%)    Avg: 3.9610ms (+20.65%)    Median: 3.9610ms (+20.65%)    [3.9610ms .. 3.9610ms]
subfields=6
exists_json_union    Memory: 120.6 KB (+107.49%)    Avg: 3.8903ms (+3.11%)    Median: 3.8903ms (+3.11%)    [3.8903ms .. 3.8903ms]
subfields=7
exists_json_union    Memory: 120.9 KB (+106.93%)    Avg: 3.6220ms (-16.22%)    Median: 3.6220ms (-16.22%)    [3.6220ms .. 3.6220ms]
subfields=8
exists_json_union    Memory: 121.3 KB (+106.23%)    Avg: 4.0981ms (-15.97%)    Median: 4.0981ms (-15.97%)    [4.0981ms .. 4.0981ms]
subfields=16
exists_json_union    Memory: 123.1 KB (+103.09%)    Avg: 4.3483ms (-92.26%)    Median: 4.3483ms (-92.26%)    [4.3483ms .. 4.3483ms]
subfields=256
exists_json_union    Memory: 204.6 KB (+19.85%)    Avg: 3.8874ms (-99.01%)    Median: 3.8874ms (-99.01%)    [3.8874ms .. 3.8874ms]
subfields=4096
exists_json_union    Memory: 2.0 MB     Avg: 3.5571ms (-99.90%)    Median: 3.5571ms (-99.90%)    [3.5571ms .. 3.5571ms]
subfields=65536
exists_json_union    Memory: 28.3 MB     Avg: 14.4417ms (-99.97%)    Median: 14.4417ms (-99.97%)    [14.4417ms .. 14.4417ms]
subfields=262144
exists_json_union    Memory: 113.3 MB     Avg: 66.2860ms (-99.95%)    Median: 66.2860ms (-99.95%)    [66.2860ms .. 66.2860ms]

* rename methods
This commit is contained in:
PSeitz-dd
2025-09-16 18:21:03 +02:00
committed by Adrien Guillo
parent 7963b0b4aa
commit eb5f51f3c4
8 changed files with 195 additions and 15 deletions

View File

@@ -167,3 +167,7 @@ harness = false
[[bench]] [[bench]]
name = "agg_bench" name = "agg_bench"
harness = false harness = false
[[bench]]
name = "exists_json"
harness = false

69
benches/exists_json.rs Normal file
View File

@@ -0,0 +1,69 @@
use binggan::plugins::PeakMemAllocPlugin;
use binggan::{black_box, InputGroup, PeakMemAlloc, INSTRUMENTED_SYSTEM};
use serde_json::json;
use tantivy::collector::Count;
use tantivy::query::ExistsQuery;
use tantivy::schema::{Schema, FAST, TEXT};
use tantivy::{doc, Index};
#[global_allocator]
pub static GLOBAL: &PeakMemAlloc<std::alloc::System> = &INSTRUMENTED_SYSTEM;
fn main() {
let doc_count: usize = 500_000;
let subfield_counts: &[usize] = &[1, 2, 3, 4, 5, 6, 7, 8, 16, 256, 4096, 65536, 262144];
let indices: Vec<(String, Index)> = subfield_counts
.iter()
.map(|&sub_fields| {
(
format!("subfields={sub_fields}"),
build_index_with_json_subfields(doc_count, sub_fields),
)
})
.collect();
let mut group = InputGroup::new_with_inputs(indices);
group.add_plugin(PeakMemAllocPlugin::new(GLOBAL));
group.config().num_iter_group = Some(1);
group.config().num_iter_bench = Some(1);
group.register("exists_json", exists_json_union);
group.run();
}
fn exists_json_union(index: &Index) {
let reader = index.reader().expect("reader");
let searcher = reader.searcher();
let query = ExistsQuery::new("json".to_string(), true);
let count = searcher.search(&query, &Count).expect("exists search");
// Prevents optimizer from eliding the search
black_box(count);
}
fn build_index_with_json_subfields(num_docs: usize, num_subfields: usize) -> Index {
// Schema: single JSON field stored as FAST to support ExistsQuery.
let mut schema_builder = Schema::builder();
let json_field = schema_builder.add_json_field("json", TEXT | FAST);
let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).expect("create index");
{
let mut index_writer = index
.writer_with_num_threads(1, 200_000_000)
.expect("writer");
for i in 0..num_docs {
let sub = i % num_subfields;
// Only one subpath set per document; rotate subpaths so that
// no single subpath is full, but the union covers all docs.
let v = json!({ format!("field_{sub}"): i as u64 });
index_writer
.add_document(doc!(json_field => v))
.expect("add_document");
}
index_writer.commit().expect("commit");
}
index
}

View File

@@ -56,7 +56,7 @@ fn get_doc_ids_with_values<'a>(
ColumnIndex::Full => Box::new(doc_range), ColumnIndex::Full => Box::new(doc_range),
ColumnIndex::Optional(optional_index) => Box::new( ColumnIndex::Optional(optional_index) => Box::new(
optional_index optional_index
.iter_docs() .iter_non_null_docs()
.map(move |row| row + doc_range.start), .map(move |row| row + doc_range.start),
), ),
ColumnIndex::Multivalued(multivalued_index) => match multivalued_index { ColumnIndex::Multivalued(multivalued_index) => match multivalued_index {
@@ -73,7 +73,7 @@ fn get_doc_ids_with_values<'a>(
MultiValueIndex::MultiValueIndexV2(multivalued_index) => Box::new( MultiValueIndex::MultiValueIndexV2(multivalued_index) => Box::new(
multivalued_index multivalued_index
.optional_index .optional_index
.iter_docs() .iter_non_null_docs()
.map(move |row| row + doc_range.start), .map(move |row| row + doc_range.start),
), ),
}, },
@@ -177,7 +177,7 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
ColumnIndex::Full => Box::new(columnar_row_range), ColumnIndex::Full => Box::new(columnar_row_range),
ColumnIndex::Optional(optional_index) => Box::new( ColumnIndex::Optional(optional_index) => Box::new(
optional_index optional_index
.iter_docs() .iter_non_null_docs()
.map(move |row_id: RowId| columnar_row_range.start + row_id), .map(move |row_id: RowId| columnar_row_range.start + row_id),
), ),
ColumnIndex::Multivalued(_) => { ColumnIndex::Multivalued(_) => {

View File

@@ -215,6 +215,32 @@ impl MultiValueIndex {
} }
} }
/// Returns an iterator over document ids that have at least one value.
pub fn iter_non_null_docs(&self) -> Box<dyn Iterator<Item = DocId> + '_> {
match self {
MultiValueIndex::MultiValueIndexV1(idx) => {
let mut doc: DocId = 0u32;
let num_docs = idx.num_docs();
Box::new(std::iter::from_fn(move || {
// This is not the most efficient way to do this, but it's legacy code.
while doc < num_docs {
let cur = doc;
doc += 1;
let start = idx.start_index_column.get_val(cur);
let end = idx.start_index_column.get_val(cur + 1);
if end > start {
return Some(cur);
}
}
None
}))
}
MultiValueIndex::MultiValueIndexV2(idx) => {
Box::new(idx.optional_index.iter_non_null_docs())
}
}
}
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of /// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
/// docids. Positions are converted inplace to docids. /// docids. Positions are converted inplace to docids.
/// ///

View File

@@ -88,7 +88,7 @@ pub struct OptionalIndex {
impl Iterable<u32> for &OptionalIndex { impl Iterable<u32> for &OptionalIndex {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> { fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new(self.iter_docs()) Box::new(self.iter_non_null_docs())
} }
} }
@@ -280,8 +280,9 @@ impl OptionalIndex {
self.num_non_null_docs self.num_non_null_docs
} }
pub fn iter_docs(&self) -> impl Iterator<Item = RowId> + '_ { pub fn iter_non_null_docs(&self) -> impl Iterator<Item = RowId> + '_ {
// TODO optimize // TODO optimize. We could iterate over the blocks directly.
// We use the dense value ids and retrieve the doc ids via select.
let mut select_batch = self.select_cursor(); let mut select_batch = self.select_cursor();
(0..self.num_non_null_docs).map(move |rank| select_batch.select(rank)) (0..self.num_non_null_docs).map(move |rank| select_batch.select(rank))
} }

View File

@@ -164,7 +164,11 @@ fn test_optional_index_large() {
fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) { fn test_optional_index_iter_aux(row_ids: &[RowId], num_rows: RowId) {
let optional_index = OptionalIndex::for_test(num_rows, row_ids); let optional_index = OptionalIndex::for_test(num_rows, row_ids);
assert_eq!(optional_index.num_docs(), num_rows); assert_eq!(optional_index.num_docs(), num_rows);
assert!(optional_index.iter_docs().eq(row_ids.iter().copied())); assert!(
optional_index
.iter_non_null_docs()
.eq(row_ids.iter().copied())
);
} }
#[test] #[test]

View File

@@ -367,7 +367,7 @@ fn is_empty_after_merge(
ColumnIndex::Empty { .. } => true, ColumnIndex::Empty { .. } => true,
ColumnIndex::Full => alive_bitset.len() == 0, ColumnIndex::Full => alive_bitset.len() == 0,
ColumnIndex::Optional(optional_index) => { ColumnIndex::Optional(optional_index) => {
for doc in optional_index.iter_docs() { for doc in optional_index.iter_non_null_docs() {
if alive_bitset.contains(doc) { if alive_bitset.contains(doc) {
return false; return false;
} }

View File

@@ -1,12 +1,15 @@
use core::fmt::Debug; use core::fmt::Debug;
use columnar::{ColumnIndex, DynamicColumn}; use columnar::{ColumnIndex, DynamicColumn};
use common::BitSet;
use super::{ConstScorer, EmptyScorer}; use super::{ConstScorer, EmptyScorer};
use crate::docset::{DocSet, TERMINATED}; use crate::docset::{DocSet, TERMINATED};
use crate::index::SegmentReader; use crate::index::SegmentReader;
use crate::query::all_query::AllScorer;
use crate::query::boost_query::BoostScorer;
use crate::query::explanation::does_not_match; use crate::query::explanation::does_not_match;
use crate::query::{EnableScoring, Explanation, Query, Scorer, Weight}; use crate::query::{BitSetDocSet, EnableScoring, Explanation, Query, Scorer, Weight};
use crate::schema::Type; use crate::schema::Type;
use crate::{DocId, Score, TantivyError}; use crate::{DocId, Score, TantivyError};
@@ -113,13 +116,49 @@ impl Weight for ExistsWeight {
non_empty_columns.push(column) non_empty_columns.push(column)
} }
} }
// TODO: we can optimizer more here since in most cases we will have only one index if non_empty_columns.is_empty() {
if !non_empty_columns.is_empty() { return Ok(Box::new(EmptyScorer));
let docset = ExistsDocSet::new(non_empty_columns, reader.max_doc());
Ok(Box::new(ConstScorer::new(docset, boost)))
} else {
Ok(Box::new(EmptyScorer))
} }
// If any column is full, all docs match.
let max_doc = reader.max_doc();
if non_empty_columns
.iter()
.any(|col| matches!(col.column_index(), ColumnIndex::Full))
{
let all_scorer = AllScorer::new(max_doc);
return Ok(Box::new(BoostScorer::new(all_scorer, boost)));
}
// If we have a single dynamic column, use ExistsDocSet
// NOTE: A lower number may be better for very sparse columns
if non_empty_columns.len() < 4 {
let docset = ExistsDocSet::new(non_empty_columns, reader.max_doc());
return Ok(Box::new(ConstScorer::new(docset, boost)));
}
// If we have many dynamic columns, precompute a bitset of matching docs
let mut doc_bitset = BitSet::with_max_value(max_doc);
for column in &non_empty_columns {
match column.column_index() {
ColumnIndex::Empty { .. } => {}
ColumnIndex::Full => {
// Handled by AllScorer return above.
}
ColumnIndex::Optional(optional_index) => {
for doc in optional_index.iter_non_null_docs() {
doc_bitset.insert(doc);
}
}
ColumnIndex::Multivalued(multi_idx) => {
for doc in multi_idx.iter_non_null_docs() {
doc_bitset.insert(doc);
}
}
}
}
let docset = BitSetDocSet::from(doc_bitset);
Ok(Box::new(ConstScorer::new(docset, boost)))
} }
fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> { fn explain(&self, reader: &SegmentReader, doc: DocId) -> crate::Result<Explanation> {
@@ -294,6 +333,43 @@ mod tests {
Ok(()) Ok(())
} }
#[test]
fn test_exists_query_json_union_no_single_full_subpath() -> crate::Result<()> {
// Build docs where no single subpath exists for all docs, but the union does.
let mut schema_builder = Schema::builder();
let json = schema_builder.add_json_field("json", TEXT | FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests()?;
for i in 0u64..100u64 {
if i % 2 == 0 {
// only subpath `a`
index_writer.add_document(doc!(json => json!({"a": i})))?;
} else {
// only subpath `b`
index_writer.add_document(doc!(json => json!({"b": i})))?;
}
}
index_writer.commit()?;
}
let reader = index.reader()?;
let searcher = reader.searcher();
// No single subpath is full
assert_eq!(count_existing_fields(&searcher, "json.a", false)?, 50);
assert_eq!(count_existing_fields(&searcher, "json.b", false)?, 50);
// Root exists with subpaths disabled is zero
assert_eq!(count_existing_fields(&searcher, "json", false)?, 0);
// Root exists with subpaths enabled should match all docs via union
assert_eq!(count_existing_fields(&searcher, "json", true)?, 100);
Ok(())
}
#[test] #[test]
fn test_exists_query_misc_supported_types() -> crate::Result<()> { fn test_exists_query_misc_supported_types() -> crate::Result<()> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();