Compare commits

...

7 Commits

Author SHA1 Message Date
Pascal Seitz
e2dae2f433 fix out of order bug 2024-06-25 08:35:58 +08:00
PSeitz
59084143ef use optional index in multivalued index (#2439)
* use optional index in multivalued index

For mostly empty multivalued indices there was a large overhead during
creation when iterating all docids. This is alleviated by placing an
optional index in the multivalued index to mark documents that have values.

There's some performance overhead when accessing values in a multivalued
index. The accessing cost is now optional index + multivalue index. The
sparse codec performs relatively bad with the binary_search when accessing
data. This is reflected in the benchmarks below.

This changes the format of columnar to v2, but code is added to handle the v1
formats.

```
     Running benches/bench_access.rs (/home/pascal/Development/tantivy/optional_multivalues/target/release/deps/bench_access-ea323c028db88db4)
multi sparse 1/13
access_values_for_doc        Avg: 42.8946ms (+241.80%)    Median: 42.8869ms (+244.10%)    [42.7484ms .. 43.1074ms]
access_first_vals            Avg: 42.8022ms (+421.93%)    Median: 42.7553ms (+439.84%)    [42.6794ms .. 43.7404ms]
multi 2x
access_values_for_doc        Avg: 31.1244ms (+24.17%)    Median: 30.8339ms (+23.46%)    [30.7192ms .. 33.6059ms]
access_first_vals            Avg: 24.3070ms (+70.92%)    Median: 24.0966ms (+70.18%)    [23.9328ms .. 26.4851ms]
sparse 1/13
access_values_for_doc        Avg: 42.2490ms (+0.61%)    Median: 42.2346ms (+2.28%)    [41.8988ms .. 43.7821ms]
access_first_vals            Avg: 43.6272ms (+0.23%)    Median: 43.6197ms (+1.78%)    [43.4920ms .. 43.9009ms]
dense 1/12
access_values_for_doc        Avg: 8.6184ms (+23.18%)    Median: 8.6126ms (+23.78%)    [8.5843ms .. 8.7527ms]
access_first_vals            Avg: 6.8112ms (+4.47%)     Median: 6.8002ms (+4.55%)     [6.7887ms .. 6.8991ms]
full
access_values_for_doc        Avg: 9.4073ms (-5.09%)    Median: 9.4023ms (-2.23%)    [9.3694ms .. 9.4568ms]
access_first_vals            Avg: 4.9531ms (+6.24%)    Median: 4.9502ms (+7.85%)    [4.9423ms .. 4.9718ms]
```

```
     Running benches/bench_merge.rs (/home/pascal/Development/tantivy/optional_multivalues/target/release/deps/bench_merge-475697dfceb3639f)
merge_multi 2x_and_multi 2x                          Avg: 20.2280ms (+34.33%)    Median: 20.1829ms (+35.33%)    [19.9933ms .. 20.8806ms]
merge_multi sparse 1/13_and_multi sparse 1/13        Avg: 0.8961ms (-78.04%)     Median: 0.8943ms (-77.61%)     [0.8899ms .. 0.9272ms]
merge_dense 1/12_and_dense 1/12                      Avg: 0.6619ms (-1.26%)      Median: 0.6616ms (+2.20%)      [0.6473ms .. 0.6837ms]
merge_sparse 1/13_and_sparse 1/13                    Avg: 0.5508ms (-0.85%)      Median: 0.5508ms (+2.80%)      [0.5420ms .. 0.5634ms]
merge_sparse 1/13_and_dense 1/12                     Avg: 0.6046ms (-4.64%)      Median: 0.6038ms (+2.80%)      [0.5939ms .. 0.6296ms]
merge_multi sparse 1/13_and_dense 1/12               Avg: 0.9111ms (-83.48%)     Median: 0.9063ms (-83.50%)     [0.9047ms .. 0.9663ms]
merge_multi sparse 1/13_and_sparse 1/13              Avg: 0.8451ms (-89.49%)     Median: 0.8428ms (-89.43%)     [0.8411ms .. 0.8563ms]
merge_multi 2x_and_dense 1/12                        Avg: 10.6624ms (-4.82%)     Median: 10.6568ms (-4.49%)     [10.5738ms .. 10.8353ms]
merge_multi 2x_and_sparse 1/13                       Avg: 10.6336ms (-22.95%)    Median: 10.5925ms (-22.33%)    [10.5149ms .. 11.5657ms]
```

* Update columnar/src/columnar/format_version.rs

Co-authored-by: Paul Masurel <paul@quickwit.io>

* Update columnar/src/column_index/mod.rs

Co-authored-by: Paul Masurel <paul@quickwit.io>

---------

Co-authored-by: Paul Masurel <paul@quickwit.io>
2024-06-19 14:54:12 +08:00
PSeitz
511b027350 update columnar bench (#2438)
* update columnar bench

* fix compile
2024-06-14 10:42:35 +08:00
Philippe Noël
322f47eb47 Add ParadeDB to Companies List (#1) (#2437)
* Add ParadeDB logo
2024-06-14 09:12:58 +09:00
PSeitz
72f61ff89c remove index sorting (#2434)
closes https://github.com/quickwit-oss/tantivy/issues/2352
2024-06-13 15:51:53 +08:00
PSeitz
a141c3ec59 add columnar format compatibiliy tests (#2433)
* add columnar format compatibiliy tests

* always try to write current format
2024-06-13 15:04:52 +08:00
PSeitz
e90e7a25ae add access benchmark for columnar (#2432) 2024-06-12 14:29:15 +08:00
59 changed files with 1514 additions and 2735 deletions

View File

@@ -101,7 +101,8 @@ cargo test
## Companies Using Tantivy
<p align="left">
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" />&nbsp;
<img align="center" src="doc/assets/images/etsy.png" alt="Etsy" height="25" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/paradedb.png" alt="ParadeDB" height="25" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/Nuclia.png#gh-light-mode-only" alt="Nuclia" height="25" width="auto" /> &nbsp;
<img align="center" src="doc/assets/images/humanfirst.png#gh-light-mode-only" alt="Humanfirst.ai" height="30" width="auto" />
<img align="center" src="doc/assets/images/element.io.svg#gh-light-mode-only" alt="Element.io" height="25" width="auto" />

View File

@@ -29,6 +29,10 @@ binggan = "0.8.1"
name = "bench_merge"
harness = false
[[bench]]
name = "bench_access"
harness = false
[features]
unstable = []

View File

@@ -0,0 +1,67 @@
use binggan::{black_box, InputGroup};
use common::*;
use tantivy_columnar::Column;
pub mod common;
const NUM_DOCS: u32 = 2_000_000;
pub fn generate_columnar_and_open(card: Card, num_docs: u32) -> Column {
let reader = generate_columnar_with_name(card, num_docs, "price");
reader.read_columns("price").unwrap()[0]
.open_u64_lenient()
.unwrap()
.unwrap()
}
fn main() {
let mut inputs = Vec::new();
let mut add_card = |card1: Card| {
inputs.push((
format!("{card1}"),
generate_columnar_and_open(card1, NUM_DOCS),
));
};
add_card(Card::MultiSparse);
add_card(Card::Multi);
add_card(Card::Sparse);
add_card(Card::Dense);
add_card(Card::Full);
bench_group(InputGroup::new_with_inputs(inputs));
}
fn bench_group(mut runner: InputGroup<Column>) {
runner.register("access_values_for_doc", |column| {
let mut sum = 0;
for i in 0..NUM_DOCS {
for value in column.values_for_doc(i) {
sum += value;
}
}
black_box(sum);
});
runner.register("access_first_vals", |column| {
let mut sum = 0;
const BLOCK_SIZE: usize = 32;
let mut docs = vec![0; BLOCK_SIZE];
let mut buffer = vec![None; BLOCK_SIZE];
for i in (0..NUM_DOCS).step_by(BLOCK_SIZE) {
// fill docs
for idx in 0..BLOCK_SIZE {
docs[idx] = idx as u32 + i;
}
column.first_vals(&docs, &mut buffer);
for val in buffer.iter() {
let Some(val) = val else { continue };
sum += *val;
}
}
black_box(sum);
});
runner.run();
}

View File

@@ -31,7 +31,7 @@ fn get_test_columns() -> Columns {
}
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer
.serialize(data.len() as u32, None, &mut buffer)
.serialize(data.len() as u32, &mut buffer)
.unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();

View File

@@ -1,62 +1,11 @@
#![feature(test)]
extern crate test;
use core::fmt;
use std::fmt::{Display, Formatter};
pub mod common;
use binggan::{black_box, BenchRunner};
use common::{generate_columnar_with_name, Card};
use tantivy_columnar::*;
enum Card {
Multi,
Sparse,
Dense,
}
impl Display for Card {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Card::Multi => write!(f, "multi"),
Card::Sparse => write!(f, "sparse"),
Card::Dense => write!(f, "dense"),
}
}
}
const NUM_DOCS: u32 = 100_000;
fn generate_columnar(card: Card, num_docs: u32) -> ColumnarReader {
use tantivy_columnar::ColumnarWriter;
let mut columnar_writer = ColumnarWriter::default();
match card {
Card::Multi => {
columnar_writer.record_numerical(0, "price", 10u64);
columnar_writer.record_numerical(0, "price", 10u64);
}
_ => {}
}
for i in 0..num_docs {
match card {
Card::Multi | Card::Sparse => {
if i % 8 == 0 {
columnar_writer.record_numerical(i, "price", i as u64);
}
}
Card::Dense => {
if i % 6 == 0 {
columnar_writer.record_numerical(i, "price", i as u64);
}
}
}
}
let mut wrt: Vec<u8> = Vec::new();
columnar_writer.serialize(num_docs, None, &mut wrt).unwrap();
ColumnarReader::open(wrt).unwrap()
}
fn main() {
let mut inputs = Vec::new();
@@ -64,16 +13,19 @@ fn main() {
inputs.push((
format!("merge_{card1}_and_{card2}"),
vec![
generate_columnar(card1, NUM_DOCS),
generate_columnar(card2, NUM_DOCS),
generate_columnar_with_name(card1, NUM_DOCS, "price"),
generate_columnar_with_name(card2, NUM_DOCS, "price"),
],
));
};
add_combo(Card::Multi, Card::Multi);
add_combo(Card::MultiSparse, Card::MultiSparse);
add_combo(Card::Dense, Card::Dense);
add_combo(Card::Sparse, Card::Sparse);
add_combo(Card::Sparse, Card::Dense);
add_combo(Card::MultiSparse, Card::Dense);
add_combo(Card::MultiSparse, Card::Sparse);
add_combo(Card::Multi, Card::Dense);
add_combo(Card::Multi, Card::Sparse);
@@ -84,16 +36,12 @@ fn main() {
input_name,
columnar_readers,
move |columnar_readers: &Vec<ColumnarReader>| {
let mut out = vec![];
let mut out = Vec::new();
let columnar_readers = columnar_readers.iter().collect::<Vec<_>>();
let merge_row_order = StackMergeOrder::stack(&columnar_readers[..]);
let _ = black_box(merge_columnar(
&columnar_readers,
&[],
merge_row_order.into(),
&mut out,
));
merge_columnar(&columnar_readers, &[], merge_row_order.into(), &mut out).unwrap();
black_box(out);
},
);
}

View File

@@ -0,0 +1,59 @@
extern crate tantivy_columnar;
use core::fmt;
use std::fmt::{Display, Formatter};
use tantivy_columnar::{ColumnarReader, ColumnarWriter};
pub enum Card {
MultiSparse,
Multi,
Sparse,
Dense,
Full,
}
impl Display for Card {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Card::MultiSparse => write!(f, "multi sparse 1/13"),
Card::Multi => write!(f, "multi 2x"),
Card::Sparse => write!(f, "sparse 1/13"),
Card::Dense => write!(f, "dense 1/12"),
Card::Full => write!(f, "full"),
}
}
}
pub fn generate_columnar_with_name(card: Card, num_docs: u32, column_name: &str) -> ColumnarReader {
let mut columnar_writer = ColumnarWriter::default();
if let Card::MultiSparse = card {
columnar_writer.record_numerical(0, column_name, 10u64);
columnar_writer.record_numerical(0, column_name, 10u64);
}
for i in 0..num_docs {
match card {
Card::MultiSparse | Card::Sparse => {
if i % 13 == 0 {
columnar_writer.record_numerical(i, column_name, i as u64);
}
}
Card::Dense => {
if i % 12 == 0 {
columnar_writer.record_numerical(i, column_name, i as u64);
}
}
Card::Full => {
columnar_writer.record_numerical(i, column_name, i as u64);
}
Card::Multi => {
columnar_writer.record_numerical(i, column_name, i as u64);
columnar_writer.record_numerical(i, column_name, i as u64);
}
}
}
let mut wrt: Vec<u8> = Vec::new();
columnar_writer.serialize(num_docs, &mut wrt).unwrap();
ColumnarReader::open(wrt).unwrap()
}

Binary file not shown.

Binary file not shown.

View File

@@ -136,7 +136,7 @@ impl<T: PartialOrd + Copy + Debug + Send + Sync + 'static> Column<T> {
.map(|value_row_id: RowId| self.values.get_val(value_row_id))
}
/// Get the docids of values which are in the provided value range.
/// Get the docids of values which are in the provided value and docid range.
#[inline]
pub fn get_docids_for_value_range(
&self,

View File

@@ -12,7 +12,7 @@ use crate::column_values::{
CodecType, MonotonicallyMappableToU128, MonotonicallyMappableToU64,
};
use crate::iterable::Iterable;
use crate::StrColumn;
use crate::{StrColumn, Version};
pub fn serialize_column_mappable_to_u128<T: MonotonicallyMappableToU128>(
column_index: SerializableColumnIndex<'_>,
@@ -40,25 +40,9 @@ pub fn serialize_column_mappable_to_u64<T: MonotonicallyMappableToU64>(
Ok(())
}
pub fn open_column_u64<T: MonotonicallyMappableToU64>(bytes: OwnedBytes) -> io::Result<Column<T>> {
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
let column_index_num_bytes = u32::from_le_bytes(
column_index_num_bytes_payload
.as_slice()
.try_into()
.unwrap(),
);
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
let column_index = crate::column_index::open_column_index(column_index_data)?;
let column_values = load_u64_based_column_values(column_values_data)?;
Ok(Column {
index: column_index,
values: column_values,
})
}
pub fn open_column_u128<T: MonotonicallyMappableToU128>(
pub fn open_column_u64<T: MonotonicallyMappableToU64>(
bytes: OwnedBytes,
format_version: Version,
) -> io::Result<Column<T>> {
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
let column_index_num_bytes = u32::from_le_bytes(
@@ -68,7 +52,27 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
.unwrap(),
);
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
let column_index = crate::column_index::open_column_index(column_index_data)?;
let column_index = crate::column_index::open_column_index(column_index_data, format_version)?;
let column_values = load_u64_based_column_values(column_values_data)?;
Ok(Column {
index: column_index,
values: column_values,
})
}
pub fn open_column_u128<T: MonotonicallyMappableToU128>(
bytes: OwnedBytes,
format_version: Version,
) -> io::Result<Column<T>> {
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
let column_index_num_bytes = u32::from_le_bytes(
column_index_num_bytes_payload
.as_slice()
.try_into()
.unwrap(),
);
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
let column_index = crate::column_index::open_column_index(column_index_data, format_version)?;
let column_values = crate::column_values::open_u128_mapped(column_values_data)?;
Ok(Column {
index: column_index,
@@ -79,7 +83,10 @@ pub fn open_column_u128<T: MonotonicallyMappableToU128>(
/// Open the column as u64.
///
/// See [`open_u128_as_compact_u64`] for more details.
pub fn open_column_u128_as_compact_u64(bytes: OwnedBytes) -> io::Result<Column<u64>> {
pub fn open_column_u128_as_compact_u64(
bytes: OwnedBytes,
format_version: Version,
) -> io::Result<Column<u64>> {
let (body, column_index_num_bytes_payload) = bytes.rsplit(4);
let column_index_num_bytes = u32::from_le_bytes(
column_index_num_bytes_payload
@@ -88,7 +95,7 @@ pub fn open_column_u128_as_compact_u64(bytes: OwnedBytes) -> io::Result<Column<u
.unwrap(),
);
let (column_index_data, column_values_data) = body.split(column_index_num_bytes as usize);
let column_index = crate::column_index::open_column_index(column_index_data)?;
let column_index = crate::column_index::open_column_index(column_index_data, format_version)?;
let column_values = crate::column_values::open_u128_as_compact_u64(column_values_data)?;
Ok(Column {
index: column_index,
@@ -96,19 +103,19 @@ pub fn open_column_u128_as_compact_u64(bytes: OwnedBytes) -> io::Result<Column<u
})
}
pub fn open_column_bytes(data: OwnedBytes) -> io::Result<BytesColumn> {
pub fn open_column_bytes(data: OwnedBytes, format_version: Version) -> io::Result<BytesColumn> {
let (body, dictionary_len_bytes) = data.rsplit(4);
let dictionary_len = u32::from_le_bytes(dictionary_len_bytes.as_slice().try_into().unwrap());
let (dictionary_bytes, column_bytes) = body.split(dictionary_len as usize);
let dictionary = Arc::new(Dictionary::from_bytes(dictionary_bytes)?);
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes)?;
let term_ord_column = crate::column::open_column_u64::<u64>(column_bytes, format_version)?;
Ok(BytesColumn {
dictionary,
term_ord_column,
})
}
pub fn open_column_str(data: OwnedBytes) -> io::Result<StrColumn> {
let bytes_column = open_column_bytes(data)?;
pub fn open_column_str(data: OwnedBytes, format_version: Version) -> io::Result<StrColumn> {
let bytes_column = open_column_bytes(data, format_version)?;
Ok(StrColumn::wrap(bytes_column))
}

View File

@@ -95,8 +95,12 @@ pub fn merge_column_index<'a>(
#[cfg(test)]
mod tests {
use common::OwnedBytes;
use crate::column_index::merge::detect_cardinality;
use crate::column_index::multivalued_index::MultiValueIndex;
use crate::column_index::multivalued_index::{
open_multivalued_index, serialize_multivalued_index, MultiValueIndex,
};
use crate::column_index::{merge_column_index, OptionalIndex, SerializableColumnIndex};
use crate::{
Cardinality, ColumnIndex, MergeRowOrder, RowAddr, RowId, ShuffleMergeOrder, StackMergeOrder,
@@ -171,7 +175,11 @@ mod tests {
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
panic!("Excpected a multivalued index")
};
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
let mut output = Vec::new();
serialize_multivalued_index(&start_index_iterable, &mut output).unwrap();
let multivalue =
open_multivalued_index(OwnedBytes::new(output), crate::Version::V2).unwrap();
let start_indexes: Vec<RowId> = multivalue.get_start_index_column().iter().collect();
assert_eq!(&start_indexes, &[0, 3, 5]);
}
@@ -200,11 +208,16 @@ mod tests {
],
)
.into();
let merged_column_index = merge_column_index(&column_indexes[..], &merge_row_order);
let SerializableColumnIndex::Multivalued(start_index_iterable) = merged_column_index else {
panic!("Excpected a multivalued index")
};
let start_indexes: Vec<RowId> = start_index_iterable.boxed_iter().collect();
let mut output = Vec::new();
serialize_multivalued_index(&start_index_iterable, &mut output).unwrap();
let multivalue =
open_multivalued_index(OwnedBytes::new(output), crate::Version::V2).unwrap();
let start_indexes: Vec<RowId> = multivalue.get_start_index_column().iter().collect();
assert_eq!(&start_indexes, &[0, 3, 5, 6]);
}
}

View File

@@ -1,6 +1,8 @@
use std::iter;
use crate::column_index::{SerializableColumnIndex, Set};
use crate::column_index::{
SerializableColumnIndex, SerializableMultivalueIndex, SerializableOptionalIndex, Set,
};
use crate::iterable::Iterable;
use crate::{Cardinality, ColumnIndex, RowId, ShuffleMergeOrder};
@@ -14,15 +16,24 @@ pub fn merge_column_index_shuffled<'a>(
Cardinality::Optional => {
let non_null_row_ids =
merge_column_index_shuffled_optional(column_indexes, shuffle_merge_order);
SerializableColumnIndex::Optional {
SerializableColumnIndex::Optional(SerializableOptionalIndex {
non_null_row_ids,
num_rows: shuffle_merge_order.num_rows(),
}
})
}
Cardinality::Multivalued => {
let multivalue_start_index =
merge_column_index_shuffled_multivalued(column_indexes, shuffle_merge_order);
SerializableColumnIndex::Multivalued(multivalue_start_index)
let non_null_row_ids =
merge_column_index_shuffled_optional(column_indexes, shuffle_merge_order);
SerializableColumnIndex::Multivalued(SerializableMultivalueIndex {
doc_ids_with_values: SerializableOptionalIndex {
non_null_row_ids,
num_rows: shuffle_merge_order.num_rows(),
},
start_offsets: merge_column_index_shuffled_multivalued(
column_indexes,
shuffle_merge_order,
),
})
}
}
}
@@ -102,11 +113,18 @@ fn iter_num_values<'a>(
/// Transforms an iterator containing the number of vals per row (with `num_rows` elements)
/// into a `start_offset` iterator starting at 0 and (with `num_rows + 1` element)
///
/// This will filter values with 0 values as these are covered by the optional index in the
/// multivalue index.
fn integrate_num_vals(num_vals: impl Iterator<Item = u32>) -> impl Iterator<Item = RowId> {
iter::once(0u32).chain(num_vals.scan(0, |state, num_vals| {
*state += num_vals;
Some(*state)
}))
iter::once(0u32).chain(
num_vals
.filter(|num_vals| *num_vals != 0)
.scan(0, |state, num_vals| {
*state += num_vals;
Some(*state)
}),
)
}
impl<'a> Iterable<u32> for ShuffledMultivaluedIndex<'a> {
@@ -134,7 +152,7 @@ mod tests {
#[test]
fn test_integrate_num_vals_several() {
assert!(integrate_num_vals([3, 0, 10, 20].into_iter()).eq([0, 3, 3, 13, 33].into_iter()));
assert!(integrate_num_vals([3, 0, 10, 20].into_iter()).eq([0, 3, 13, 33].into_iter()));
}
#[test]
@@ -157,10 +175,10 @@ mod tests {
Cardinality::Optional,
&shuffle_merge_order,
);
let SerializableColumnIndex::Optional {
let SerializableColumnIndex::Optional(SerializableOptionalIndex {
non_null_row_ids,
num_rows,
} = serializable_index
}) = serializable_index
else {
panic!()
};

View File

@@ -1,6 +1,8 @@
use std::iter;
use std::ops::Range;
use crate::column_index::{SerializableColumnIndex, Set};
use crate::column_index::multivalued_index::{MultiValueIndex, SerializableMultivalueIndex};
use crate::column_index::serialize::SerializableOptionalIndex;
use crate::column_index::SerializableColumnIndex;
use crate::iterable::Iterable;
use crate::{Cardinality, ColumnIndex, RowId, StackMergeOrder};
@@ -15,23 +17,149 @@ pub fn merge_column_index_stacked<'a>(
) -> SerializableColumnIndex<'a> {
match cardinality_after_merge {
Cardinality::Full => SerializableColumnIndex::Full,
Cardinality::Optional => SerializableColumnIndex::Optional {
Cardinality::Optional => SerializableColumnIndex::Optional(SerializableOptionalIndex {
non_null_row_ids: Box::new(StackedOptionalIndex {
columns,
stack_merge_order,
}),
num_rows: stack_merge_order.num_rows(),
},
}),
Cardinality::Multivalued => {
let stacked_multivalued_index = StackedMultivaluedIndex {
columns,
stack_merge_order,
};
SerializableColumnIndex::Multivalued(Box::new(stacked_multivalued_index))
let serializable_multivalue_index =
make_serializable_multivalued_index(columns, stack_merge_order);
SerializableColumnIndex::Multivalued(serializable_multivalue_index)
}
}
}
struct StackedDocIdsWithValues<'a> {
column_indexes: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder,
}
impl Iterable<u32> for StackedDocIdsWithValues<'_> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new((0..self.column_indexes.len()).flat_map(|i| {
let column_index = &self.column_indexes[i];
let doc_range = self.stack_merge_order.columnar_range(i);
get_doc_ids_with_values(column_index, doc_range)
}))
}
}
fn get_doc_ids_with_values<'a>(
column_index: &'a ColumnIndex,
doc_range: Range<u32>,
) -> Box<dyn Iterator<Item = u32> + 'a> {
match column_index {
ColumnIndex::Empty { .. } => Box::new(0..0),
ColumnIndex::Full => Box::new(doc_range),
ColumnIndex::Optional(optional_index) => Box::new(
optional_index
.iter_rows()
.map(move |row| row + doc_range.start),
),
ColumnIndex::Multivalued(multivalued_index) => match multivalued_index {
MultiValueIndex::MultiValueIndexV1(multivalued_index) => {
Box::new((0..multivalued_index.num_docs()).filter_map(move |docid| {
let range = multivalued_index.range(docid);
if range.is_empty() {
None
} else {
Some(docid + doc_range.start)
}
}))
}
MultiValueIndex::MultiValueIndexV2(multivalued_index) => Box::new(
multivalued_index
.optional_index
.iter_rows()
.map(move |row| row + doc_range.start),
),
},
}
}
fn stack_doc_ids_with_values<'a>(
column_indexes: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder,
) -> SerializableOptionalIndex<'a> {
let num_rows = stack_merge_order.num_rows();
SerializableOptionalIndex {
non_null_row_ids: Box::new(StackedDocIdsWithValues {
column_indexes,
stack_merge_order,
}),
num_rows,
}
}
struct StackedStartOffsets<'a> {
column_indexes: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder,
}
fn get_num_values_iterator<'a>(
column_index: &'a ColumnIndex,
num_docs: u32,
) -> Box<dyn Iterator<Item = u32> + 'a> {
match column_index {
ColumnIndex::Empty { .. } => Box::new(std::iter::empty()),
ColumnIndex::Full => Box::new(std::iter::repeat(1u32).take(num_docs as usize)),
ColumnIndex::Optional(optional_index) => {
Box::new(std::iter::repeat(1u32).take(optional_index.num_non_nulls() as usize))
}
ColumnIndex::Multivalued(multivalued_index) => Box::new(
multivalued_index
.get_start_index_column()
.iter()
.scan(0u32, |previous_start_offset, current_start_offset| {
let num_vals = current_start_offset - *previous_start_offset;
*previous_start_offset = current_start_offset;
Some(num_vals)
})
.skip(1),
),
}
}
impl<'a> Iterable<u32> for StackedStartOffsets<'a> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
let num_values_it = (0..self.column_indexes.len()).flat_map(|columnar_id| {
let num_docs = self.stack_merge_order.columnar_range(columnar_id).len() as u32;
let column_index = &self.column_indexes[columnar_id];
get_num_values_iterator(column_index, num_docs)
});
Box::new(std::iter::once(0u32).chain(num_values_it.into_iter().scan(
0u32,
|cumulated, el| {
*cumulated += el;
Some(*cumulated)
},
)))
}
}
fn stack_start_offsets<'a>(
column_indexes: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder,
) -> Box<dyn Iterable<u32> + 'a> {
Box::new(StackedStartOffsets {
column_indexes,
stack_merge_order,
})
}
fn make_serializable_multivalued_index<'a>(
columns: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder,
) -> SerializableMultivalueIndex<'a> {
SerializableMultivalueIndex {
doc_ids_with_values: stack_doc_ids_with_values(columns, stack_merge_order),
start_offsets: stack_start_offsets(columns, stack_merge_order),
}
}
struct StackedOptionalIndex<'a> {
columns: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder,
@@ -62,87 +190,3 @@ impl<'a> Iterable<RowId> for StackedOptionalIndex<'a> {
)
}
}
#[derive(Clone, Copy)]
struct StackedMultivaluedIndex<'a> {
columns: &'a [ColumnIndex],
stack_merge_order: &'a StackMergeOrder,
}
fn convert_column_opt_to_multivalued_index<'a>(
column_index_opt: &'a ColumnIndex,
num_rows: RowId,
) -> Box<dyn Iterator<Item = RowId> + 'a> {
match column_index_opt {
ColumnIndex::Empty { .. } => Box::new(iter::repeat(0u32).take(num_rows as usize + 1)),
ColumnIndex::Full => Box::new(0..num_rows + 1),
ColumnIndex::Optional(optional_index) => {
Box::new(
(0..num_rows)
// TODO optimize
.map(|row_id| optional_index.rank(row_id))
.chain(std::iter::once(optional_index.num_non_nulls())),
)
}
ColumnIndex::Multivalued(multivalued_index) => multivalued_index.start_index_column.iter(),
}
}
impl<'a> Iterable<RowId> for StackedMultivaluedIndex<'a> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = RowId> + '_> {
let multivalued_indexes =
self.columns
.iter()
.enumerate()
.map(|(columnar_id, column_opt)| {
let num_rows =
self.stack_merge_order.columnar_range(columnar_id).len() as RowId;
convert_column_opt_to_multivalued_index(column_opt, num_rows)
});
stack_multivalued_indexes(multivalued_indexes)
}
}
// Refactor me
fn stack_multivalued_indexes<'a>(
mut multivalued_indexes: impl Iterator<Item = Box<dyn Iterator<Item = RowId> + 'a>> + 'a,
) -> Box<dyn Iterator<Item = RowId> + 'a> {
let mut offset = 0;
let mut last_row_id = 0;
let mut current_it = multivalued_indexes.next();
Box::new(std::iter::from_fn(move || loop {
if let Some(row_id) = current_it.as_mut()?.next() {
last_row_id = offset + row_id;
return Some(last_row_id);
}
offset = last_row_id;
loop {
current_it = multivalued_indexes.next();
if current_it.as_mut()?.next().is_some() {
break;
}
}
}))
}
#[cfg(test)]
mod tests {
use crate::RowId;
fn it<'a>(row_ids: &'a [RowId]) -> Box<dyn Iterator<Item = RowId> + 'a> {
Box::new(row_ids.iter().copied())
}
#[test]
fn test_stack() {
let columns = [
it(&[0u32, 0u32]),
it(&[0u32, 1u32, 1u32, 4u32]),
it(&[0u32, 3u32, 5u32]),
it(&[0u32, 4u32]),
]
.into_iter();
let start_offsets: Vec<RowId> = super::stack_multivalued_indexes(columns).collect();
assert_eq!(start_offsets, &[0, 0, 1, 1, 4, 7, 9, 13]);
}
}

View File

@@ -11,8 +11,11 @@ mod serialize;
use std::ops::Range;
pub use merge::merge_column_index;
pub(crate) use multivalued_index::SerializableMultivalueIndex;
pub use optional_index::{OptionalIndex, Set};
pub use serialize::{open_column_index, serialize_column_index, SerializableColumnIndex};
pub use serialize::{
open_column_index, serialize_column_index, SerializableColumnIndex, SerializableOptionalIndex,
};
use crate::column_index::multivalued_index::MultiValueIndex;
use crate::{Cardinality, DocId, RowId};
@@ -131,15 +134,41 @@ impl ColumnIndex {
let row_end = optional_index.rank(doc_id_range.end);
row_start..row_end
}
ColumnIndex::Multivalued(multivalued_index) => {
let end_docid = doc_id_range.end.min(multivalued_index.num_docs() - 1) + 1;
let start_docid = doc_id_range.start.min(end_docid);
ColumnIndex::Multivalued(multivalued_index) => match multivalued_index {
MultiValueIndex::MultiValueIndexV1(index) => {
let row_start = index.start_index_column.get_val(doc_id_range.start);
let row_end = index.start_index_column.get_val(doc_id_range.end);
row_start..row_end
}
MultiValueIndex::MultiValueIndexV2(index) => {
// In this case we will use the optional_index select the next values
// that are valid. There are different cases to consider:
// Not exists below means does not exist in the optional
// index, because it has no values.
// * doc_id_range may cover a range of docids which are non existent
// => rank
// will give us the next document outside the range with a value. They both
// get the same rank and therefore return a zero range
//
// * doc_id_range.start and doc_id_range.end may not exist, but docids in
// between may have values
// => rank will give us the next document outside the range with a value.
//
// * doc_id_range.start may be not existent but doc_id_range.end may exist
// * doc_id_range.start may exist but doc_id_range.end may not exist
// * doc_id_range.start and doc_id_range.end may exist
// => rank on doc_id_range.end will give use the next value, which matches
// how the `start_index_column` works, so we get the value start of the next
// docid which we use to create the exclusive range.
//
let rank_start = index.optional_index.rank(doc_id_range.start);
let row_start = index.start_index_column.get_val(rank_start);
let rank_end = index.optional_index.rank(doc_id_range.end);
let row_end = index.start_index_column.get_val(rank_end);
let row_start = multivalued_index.start_index_column.get_val(start_docid);
let row_end = multivalued_index.start_index_column.get_val(end_docid);
row_start..row_end
}
row_start..row_end
}
},
}
}

View File

@@ -3,64 +3,98 @@ use std::io::Write;
use std::ops::Range;
use std::sync::Arc;
use common::OwnedBytes;
use common::{CountingWriter, OwnedBytes};
use super::optional_index::{open_optional_index, serialize_optional_index};
use super::{OptionalIndex, SerializableOptionalIndex, Set};
use crate::column_values::{
load_u64_based_column_values, serialize_u64_based_column_values, CodecType, ColumnValues,
};
use crate::iterable::Iterable;
use crate::{DocId, RowId};
use crate::{DocId, RowId, Version};
pub struct SerializableMultivalueIndex<'a> {
pub doc_ids_with_values: SerializableOptionalIndex<'a>,
pub start_offsets: Box<dyn Iterable<u32> + 'a>,
}
pub fn serialize_multivalued_index(
multivalued_index: &dyn Iterable<RowId>,
multivalued_index: &SerializableMultivalueIndex,
output: &mut impl Write,
) -> io::Result<()> {
let SerializableMultivalueIndex {
doc_ids_with_values,
start_offsets,
} = multivalued_index;
let mut count_writer = CountingWriter::wrap(output);
let SerializableOptionalIndex {
non_null_row_ids,
num_rows,
} = doc_ids_with_values;
serialize_optional_index(&**non_null_row_ids, *num_rows, &mut count_writer)?;
let optional_len = count_writer.written_bytes() as u32;
let output = count_writer.finish();
serialize_u64_based_column_values(
multivalued_index,
&**start_offsets,
&[CodecType::Bitpacked, CodecType::Linear],
output,
)?;
output.write_all(&optional_len.to_le_bytes())?;
Ok(())
}
pub fn open_multivalued_index(bytes: OwnedBytes) -> io::Result<MultiValueIndex> {
let start_index_column: Arc<dyn ColumnValues<RowId>> = load_u64_based_column_values(bytes)?;
Ok(MultiValueIndex { start_index_column })
pub fn open_multivalued_index(
bytes: OwnedBytes,
format_version: Version,
) -> io::Result<MultiValueIndex> {
match format_version {
Version::V1 => {
let start_index_column: Arc<dyn ColumnValues<RowId>> =
load_u64_based_column_values(bytes)?;
Ok(MultiValueIndex::MultiValueIndexV1(MultiValueIndexV1 {
start_index_column,
}))
}
Version::V2 => {
let (body_bytes, optional_index_len) = bytes.rsplit(4);
let optional_index_len =
u32::from_le_bytes(optional_index_len.as_slice().try_into().unwrap());
let (optional_index_bytes, start_index_bytes) =
body_bytes.split(optional_index_len as usize);
let optional_index = open_optional_index(optional_index_bytes)?;
let start_index_column: Arc<dyn ColumnValues<RowId>> =
load_u64_based_column_values(start_index_bytes)?;
Ok(MultiValueIndex::MultiValueIndexV2(MultiValueIndexV2 {
optional_index,
start_index_column,
}))
}
}
}
#[derive(Clone)]
/// Index to resolve value range for given doc_id.
/// Starts at 0.
pub struct MultiValueIndex {
pub enum MultiValueIndex {
MultiValueIndexV1(MultiValueIndexV1),
MultiValueIndexV2(MultiValueIndexV2),
}
#[derive(Clone)]
/// Index to resolve value range for given doc_id.
/// Starts at 0.
pub struct MultiValueIndexV1 {
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
}
impl std::fmt::Debug for MultiValueIndex {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("MultiValuedIndex")
.field("num_rows", &self.start_index_column.num_vals())
.finish_non_exhaustive()
}
}
impl From<Arc<dyn ColumnValues<RowId>>> for MultiValueIndex {
fn from(start_index_column: Arc<dyn ColumnValues<RowId>>) -> Self {
MultiValueIndex { start_index_column }
}
}
impl MultiValueIndex {
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex {
let mut buffer = Vec::new();
serialize_multivalued_index(&start_offsets, &mut buffer).unwrap();
let bytes = OwnedBytes::new(buffer);
open_multivalued_index(bytes).unwrap()
}
impl MultiValueIndexV1 {
/// Returns `[start, end)`, such that the values associated with
/// the given document are `start..end`.
#[inline]
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
if doc_id >= self.num_docs() {
return 0..0;
}
let start = self.start_index_column.get_val(doc_id);
let end = self.start_index_column.get_val(doc_id + 1);
start..end
@@ -83,7 +117,6 @@ impl MultiValueIndex {
///
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
/// match a docid to its value position.
#[allow(clippy::bool_to_int_with_if)]
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
if ranks.is_empty() {
return;
@@ -111,11 +144,170 @@ impl MultiValueIndex {
}
}
#[derive(Clone)]
/// Index to resolve value range for given doc_id.
/// Starts at 0.
pub struct MultiValueIndexV2 {
pub optional_index: OptionalIndex,
pub start_index_column: Arc<dyn crate::ColumnValues<RowId>>,
}
impl std::fmt::Debug for MultiValueIndex {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let index = match self {
MultiValueIndex::MultiValueIndexV1(idx) => &idx.start_index_column,
MultiValueIndex::MultiValueIndexV2(idx) => &idx.start_index_column,
};
f.debug_struct("MultiValuedIndex")
.field("num_rows", &index.num_vals())
.finish_non_exhaustive()
}
}
impl MultiValueIndex {
pub fn for_test(start_offsets: &[RowId]) -> MultiValueIndex {
assert!(!start_offsets.is_empty());
assert_eq!(start_offsets[0], 0);
let mut doc_with_values = Vec::new();
let mut compact_start_offsets: Vec<u32> = vec![0];
for doc in 0..start_offsets.len() - 1 {
if start_offsets[doc] < start_offsets[doc + 1] {
doc_with_values.push(doc as RowId);
compact_start_offsets.push(start_offsets[doc + 1]);
}
}
let serializable_multivalued_index = SerializableMultivalueIndex {
doc_ids_with_values: SerializableOptionalIndex {
non_null_row_ids: Box::new(&doc_with_values[..]),
num_rows: start_offsets.len() as u32 - 1,
},
start_offsets: Box::new(&compact_start_offsets[..]),
};
let mut buffer = Vec::new();
serialize_multivalued_index(&serializable_multivalued_index, &mut buffer).unwrap();
let bytes = OwnedBytes::new(buffer);
open_multivalued_index(bytes, Version::V2).unwrap()
}
pub fn get_start_index_column(&self) -> &Arc<dyn crate::ColumnValues<RowId>> {
match self {
MultiValueIndex::MultiValueIndexV1(idx) => &idx.start_index_column,
MultiValueIndex::MultiValueIndexV2(idx) => &idx.start_index_column,
}
}
/// Returns `[start, end)` values range, such that the values associated with
/// the given document are `start..end`.
#[inline]
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
match self {
MultiValueIndex::MultiValueIndexV1(idx) => idx.range(doc_id),
MultiValueIndex::MultiValueIndexV2(idx) => idx.range(doc_id),
}
}
/// Returns the number of documents in the index.
#[inline]
pub fn num_docs(&self) -> u32 {
match self {
MultiValueIndex::MultiValueIndexV1(idx) => idx.start_index_column.num_vals() - 1,
MultiValueIndex::MultiValueIndexV2(idx) => idx.optional_index.num_docs(),
}
}
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
/// docids. Positions are converted inplace to docids.
///
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
/// index.
///
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
/// increasing positions.
///
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
/// match a docid to its value position.
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
match self {
MultiValueIndex::MultiValueIndexV1(idx) => {
idx.select_batch_in_place(docid_start, ranks)
}
MultiValueIndex::MultiValueIndexV2(idx) => {
idx.select_batch_in_place(docid_start, ranks)
}
}
}
}
impl MultiValueIndexV2 {
/// Returns `[start, end)`, such that the values associated with
/// the given document are `start..end`.
#[inline]
pub(crate) fn range(&self, doc_id: DocId) -> Range<RowId> {
let Some(rank) = self.optional_index.rank_if_exists(doc_id) else {
return 0..0;
};
let start = self.start_index_column.get_val(rank);
let end = self.start_index_column.get_val(rank + 1);
start..end
}
/// Returns the number of documents in the index.
#[inline]
pub fn num_docs(&self) -> u32 {
self.optional_index.num_docs()
}
/// Converts a list of ranks (row ids of values) in a 1:n index to the corresponding list of
/// docids. Positions are converted inplace to docids.
///
/// Since there is no index for value pos -> docid, but docid -> value pos range, we scan the
/// index.
///
/// Correctness: positions needs to be sorted. idx_reader needs to contain monotonically
/// increasing positions.
///
/// TODO: Instead of a linear scan we can employ a exponential search into binary search to
/// match a docid to its value position.
pub(crate) fn select_batch_in_place(&self, docid_start: DocId, ranks: &mut Vec<u32>) {
if ranks.is_empty() {
return;
}
let mut cur_pos_in_idx = self.optional_index.rank(docid_start);
let mut last_doc = None;
assert!(cur_pos_in_idx <= ranks[0]);
let mut write_doc_pos = 0;
for i in 0..ranks.len() {
let pos = ranks[i];
loop {
let end = self.start_index_column.get_val(cur_pos_in_idx + 1);
if end > pos {
ranks[write_doc_pos] = cur_pos_in_idx;
write_doc_pos += if last_doc == Some(cur_pos_in_idx) {
0
} else {
1
};
last_doc = Some(cur_pos_in_idx);
break;
}
cur_pos_in_idx += 1;
}
}
ranks.truncate(write_doc_pos);
for rank in ranks.iter_mut() {
*rank = self.optional_index.select(*rank);
}
}
}
#[cfg(test)]
mod tests {
use std::ops::Range;
use super::MultiValueIndex;
use crate::{ColumnarReader, DynamicColumn};
fn index_to_pos_helper(
index: &MultiValueIndex,
@@ -134,6 +326,7 @@ mod tests {
let positions = &[10u32, 11, 15, 20, 21, 22];
assert_eq!(index_to_pos_helper(&index, 0..5, positions), vec![1, 3, 4]);
assert_eq!(index_to_pos_helper(&index, 1..5, positions), vec![1, 3, 4]);
assert_eq!(index_to_pos_helper(&index, 0..5, &[9]), vec![0]);
assert_eq!(index_to_pos_helper(&index, 1..5, &[10]), vec![1]);
assert_eq!(index_to_pos_helper(&index, 1..5, &[11]), vec![1]);
@@ -141,4 +334,67 @@ mod tests {
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14]), vec![2]);
assert_eq!(index_to_pos_helper(&index, 2..5, &[12, 14, 15]), vec![2, 3]);
}
#[test]
fn test_range_to_rowids() {
use crate::ColumnarWriter;
let mut columnar_writer = ColumnarWriter::default();
// This column gets coerced to u64
columnar_writer.record_numerical(1, "full", u64::MAX);
columnar_writer.record_numerical(1, "full", u64::MAX);
columnar_writer.record_numerical(5, "full", u64::MAX);
columnar_writer.record_numerical(5, "full", u64::MAX);
let mut wrt: Vec<u8> = Vec::new();
columnar_writer.serialize(7, &mut wrt).unwrap();
let reader = ColumnarReader::open(wrt).unwrap();
// Open the column as u64
let column = reader.read_columns("full").unwrap()[0]
.open()
.unwrap()
.coerce_numerical(crate::NumericalType::U64)
.unwrap();
let DynamicColumn::U64(column) = column else {
panic!();
};
let row_id_range = column.index.docid_range_to_rowids(1..2);
assert_eq!(row_id_range, 0..2);
let row_id_range = column.index.docid_range_to_rowids(0..2);
assert_eq!(row_id_range, 0..2);
let row_id_range = column.index.docid_range_to_rowids(0..4);
assert_eq!(row_id_range, 0..2);
let row_id_range = column.index.docid_range_to_rowids(3..4);
assert_eq!(row_id_range, 2..2);
let row_id_range = column.index.docid_range_to_rowids(1..6);
assert_eq!(row_id_range, 0..4);
let row_id_range = column.index.docid_range_to_rowids(3..6);
assert_eq!(row_id_range, 2..4);
let row_id_range = column.index.docid_range_to_rowids(0..6);
assert_eq!(row_id_range, 0..4);
let row_id_range = column.index.docid_range_to_rowids(0..6);
assert_eq!(row_id_range, 0..4);
let check = |range, expected| {
let full_range = 0..=u64::MAX;
let mut docids = Vec::new();
column.get_docids_for_value_range(full_range, range, &mut docids);
assert_eq!(docids, expected);
};
// check(0..1, vec![]);
// check(0..2, vec![1]);
check(1..2, vec![1]);
}
}

View File

@@ -86,8 +86,14 @@ pub struct OptionalIndex {
block_metas: Arc<[BlockMeta]>,
}
impl<'a> Iterable<u32> for &'a OptionalIndex {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u32> + '_> {
Box::new(self.iter_rows())
}
}
impl std::fmt::Debug for OptionalIndex {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("OptionalIndex")
.field("num_rows", &self.num_rows)
.field("num_non_null_rows", &self.num_non_null_rows)

View File

@@ -28,10 +28,11 @@ pub trait Set<T> {
/// Returns true if the elements is contained in the Set
fn contains(&self, el: T) -> bool;
/// Returns the number of rows in the set that are < `el`
/// Returns the element's rank (its position in the set).
/// If the set does not contain the element, it will return the next existing elements rank.
fn rank(&self, el: T) -> T;
/// If the set contains `el` returns the element rank.
/// If the set contains `el`, returns the element's rank (its position in the set).
/// If the set does not contain the element, it returns `None`.
fn rank_if_exists(&self, el: T) -> Option<T>;

View File

@@ -22,8 +22,8 @@ fn test_set_helper<C: SetCodec<Item = u16>>(vals: &[u16]) -> usize {
vals.iter().cloned().take_while(|v| *v < val).count() as u16
);
}
for rank in 0..vals.len() {
assert_eq!(tested_set.select(rank as u16), vals[rank]);
for (rank, val) in vals.iter().enumerate() {
assert_eq!(tested_set.select(rank as u16), *val);
}
buffer.len()
}
@@ -107,3 +107,41 @@ fn test_simple_translate_codec_idx_to_original_idx_dense() {
assert_eq!(i, select_cursor.select(i));
}
}
#[test]
fn test_simple_translate_idx_to_value_idx_dense() {
let mut buffer = Vec::new();
DenseBlockCodec::serialize([1, 10].iter().copied(), &mut buffer).unwrap();
let tested_set = DenseBlockCodec::open(buffer.as_slice());
assert!(tested_set.contains(1));
assert!(!tested_set.contains(2));
assert_eq!(tested_set.rank(0), 0);
assert_eq!(tested_set.rank(1), 0);
for rank in 2..10 {
// ranks that don't exist select the next highest one
assert_eq!(tested_set.rank_if_exists(rank), None);
assert_eq!(tested_set.rank(rank), 1);
}
assert_eq!(tested_set.rank(10), 1);
}
#[test]
fn test_simple_translate_idx_to_value_idx_sparse() {
let mut buffer = Vec::new();
SparseBlockCodec::serialize([1, 10].iter().copied(), &mut buffer).unwrap();
let tested_set = SparseBlockCodec::open(buffer.as_slice());
assert!(tested_set.contains(1));
assert!(!tested_set.contains(2));
assert_eq!(tested_set.rank(0), 0);
assert_eq!(tested_set.select(tested_set.rank(0)), 1);
assert_eq!(tested_set.rank(1), 0);
assert_eq!(tested_set.select(tested_set.rank(1)), 1);
for rank in 2..10 {
// ranks that don't exist select the next highest one
assert_eq!(tested_set.rank_if_exists(rank), None);
assert_eq!(tested_set.rank(rank), 1);
assert_eq!(tested_set.select(tested_set.rank(rank)), 10);
}
assert_eq!(tested_set.rank(10), 1);
assert_eq!(tested_set.select(tested_set.rank(10)), 10);
}

View File

@@ -15,9 +15,7 @@ fn test_optional_index_with_num_docs(num_docs: u32) {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(100, "score", 80i64);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer
.serialize(num_docs, None, &mut buffer)
.unwrap();
dataframe_writer.serialize(num_docs, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("score").unwrap();

View File

@@ -3,28 +3,39 @@ use std::io::Write;
use common::{CountingWriter, OwnedBytes};
use super::multivalued_index::SerializableMultivalueIndex;
use super::OptionalIndex;
use crate::column_index::multivalued_index::serialize_multivalued_index;
use crate::column_index::optional_index::serialize_optional_index;
use crate::column_index::ColumnIndex;
use crate::iterable::Iterable;
use crate::{Cardinality, RowId};
use crate::{Cardinality, RowId, Version};
pub struct SerializableOptionalIndex<'a> {
pub non_null_row_ids: Box<dyn Iterable<RowId> + 'a>,
pub num_rows: RowId,
}
impl<'a> From<&'a OptionalIndex> for SerializableOptionalIndex<'a> {
fn from(optional_index: &'a OptionalIndex) -> Self {
SerializableOptionalIndex {
non_null_row_ids: Box::new(optional_index),
num_rows: optional_index.num_docs(),
}
}
}
pub enum SerializableColumnIndex<'a> {
Full,
Optional {
non_null_row_ids: Box<dyn Iterable<RowId> + 'a>,
num_rows: RowId,
},
// TODO remove the Arc<dyn> apart from serialization this is not
// dynamic at all.
Multivalued(Box<dyn Iterable<RowId> + 'a>),
Optional(SerializableOptionalIndex<'a>),
Multivalued(SerializableMultivalueIndex<'a>),
}
impl<'a> SerializableColumnIndex<'a> {
pub fn get_cardinality(&self) -> Cardinality {
match self {
SerializableColumnIndex::Full => Cardinality::Full,
SerializableColumnIndex::Optional { .. } => Cardinality::Optional,
SerializableColumnIndex::Optional(_) => Cardinality::Optional,
SerializableColumnIndex::Multivalued(_) => Cardinality::Multivalued,
}
}
@@ -40,12 +51,12 @@ pub fn serialize_column_index(
output.write_all(&[cardinality])?;
match column_index {
SerializableColumnIndex::Full => {}
SerializableColumnIndex::Optional {
SerializableColumnIndex::Optional(SerializableOptionalIndex {
non_null_row_ids,
num_rows,
} => serialize_optional_index(non_null_row_ids.as_ref(), num_rows, &mut output)?,
}) => serialize_optional_index(non_null_row_ids.as_ref(), num_rows, &mut output)?,
SerializableColumnIndex::Multivalued(multivalued_index) => {
serialize_multivalued_index(&*multivalued_index, &mut output)?
serialize_multivalued_index(&multivalued_index, &mut output)?
}
}
let column_index_num_bytes = output.written_bytes() as u32;
@@ -53,7 +64,10 @@ pub fn serialize_column_index(
}
/// Open a serialized column index.
pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
pub fn open_column_index(
mut bytes: OwnedBytes,
format_version: Version,
) -> io::Result<ColumnIndex> {
if bytes.is_empty() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
@@ -70,7 +84,8 @@ pub fn open_column_index(mut bytes: OwnedBytes) -> io::Result<ColumnIndex> {
Ok(ColumnIndex::Optional(optional_index))
}
Cardinality::Multivalued => {
let multivalue_index = super::multivalued_index::open_multivalued_index(bytes)?;
let multivalue_index =
super::multivalued_index::open_multivalued_index(bytes, format_version)?;
Ok(ColumnIndex::Multivalued(multivalue_index))
}
}

View File

@@ -1,3 +1,6 @@
use core::fmt;
use std::fmt::{Display, Formatter};
use crate::InvalidData;
pub const VERSION_FOOTER_NUM_BYTES: usize = MAGIC_BYTES.len() + std::mem::size_of::<u32>();
@@ -8,7 +11,7 @@ const MAGIC_BYTES: [u8; 4] = [2, 113, 119, 66];
pub fn footer() -> [u8; VERSION_FOOTER_NUM_BYTES] {
let mut footer_bytes = [0u8; VERSION_FOOTER_NUM_BYTES];
footer_bytes[0..4].copy_from_slice(&Version::V1.to_bytes());
footer_bytes[0..4].copy_from_slice(&CURRENT_VERSION.to_bytes());
footer_bytes[4..8].copy_from_slice(&MAGIC_BYTES[..]);
footer_bytes
}
@@ -20,10 +23,22 @@ pub fn parse_footer(footer_bytes: [u8; VERSION_FOOTER_NUM_BYTES]) -> Result<Vers
Version::try_from_bytes(footer_bytes[0..4].try_into().unwrap())
}
pub const CURRENT_VERSION: Version = Version::V2;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(u32)]
pub enum Version {
V1 = 1u32,
V2 = 2u32,
}
impl Display for Version {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Version::V1 => write!(f, "v1"),
Version::V2 => write!(f, "v2"),
}
}
}
impl Version {
@@ -35,6 +50,7 @@ impl Version {
let code = u32::from_le_bytes(bytes);
match code {
1u32 => Ok(Version::V1),
2u32 => Ok(Version::V2),
_ => Err(InvalidData),
}
}
@@ -47,9 +63,9 @@ mod tests {
use super::*;
#[test]
fn test_footer_dserialization() {
fn test_footer_deserialization() {
let parsed_version: Version = parse_footer(footer()).unwrap();
assert_eq!(Version::V1, parsed_version);
assert_eq!(Version::V2, parsed_version);
}
#[test]
@@ -63,11 +79,10 @@ mod tests {
for &i in &version_to_tests {
let version_res = Version::try_from_bytes(i.to_le_bytes());
if let Ok(version) = version_res {
assert_eq!(version, Version::V1);
assert_eq!(version.to_bytes(), i.to_le_bytes());
valid_versions.insert(i);
}
}
assert_eq!(valid_versions.len(), 1);
assert_eq!(valid_versions.len(), 2);
}
}

View File

@@ -7,7 +7,6 @@ use std::io;
use std::net::Ipv6Addr;
use std::sync::Arc;
use itertools::Itertools;
pub use merge_mapping::{MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};
use super::writer::ColumnarSerializer;
@@ -371,20 +370,8 @@ fn is_empty_after_merge(
true
}
ColumnIndex::Multivalued(multivalued_index) => {
for (doc_id, (start_index, end_index)) in multivalued_index
.start_index_column
.iter()
.tuple_windows()
.enumerate()
{
let doc_id = doc_id as u32;
if start_index == end_index {
// There are no values in this document
continue;
}
// The document contains values and is present in the alive bitset.
// The column is therefore not empty.
if alive_bitset.contains(doc_id) {
for alive_docid in alive_bitset.iter() {
if !multivalued_index.range(alive_docid).is_empty() {
return false;
}
}

View File

@@ -1,3 +1,5 @@
use itertools::Itertools;
use super::*;
use crate::{Cardinality, ColumnarWriter, HasAssociatedColumnType, RowId};
@@ -12,7 +14,7 @@ fn make_columnar<T: Into<NumericalValue> + HasAssociatedColumnType + Copy>(
}
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer
.serialize(vals.len() as RowId, None, &mut buffer)
.serialize(vals.len() as RowId, &mut buffer)
.unwrap();
ColumnarReader::open(buffer).unwrap()
}
@@ -157,9 +159,7 @@ fn make_numerical_columnar_multiple_columns(
.max()
.unwrap_or(0u32);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer
.serialize(num_rows, None, &mut buffer)
.unwrap();
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
}
@@ -182,9 +182,7 @@ fn make_byte_columnar_multiple_columns(
}
}
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer
.serialize(num_rows, None, &mut buffer)
.unwrap();
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
}
@@ -203,9 +201,7 @@ fn make_text_columnar_multiple_columns(columns: &[(&str, &[&[&str]])]) -> Column
.max()
.unwrap_or(0u32);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer
.serialize(num_rows, None, &mut buffer)
.unwrap();
dataframe_writer.serialize(num_rows, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
}

View File

@@ -5,6 +5,7 @@ mod reader;
mod writer;
pub use column_type::{ColumnType, HasAssociatedColumnType};
pub use format_version::{Version, CURRENT_VERSION};
#[cfg(test)]
pub(crate) use merge::ColumnTypeCategory;
pub use merge::{merge_columnar, MergeRowOrder, ShuffleMergeOrder, StackMergeOrder};

View File

@@ -6,7 +6,7 @@ use sstable::{Dictionary, RangeSSTable};
use crate::columnar::{format_version, ColumnType};
use crate::dynamic_column::DynamicColumnHandle;
use crate::RowId;
use crate::{RowId, Version};
fn io_invalid_data(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, msg)
@@ -19,6 +19,7 @@ pub struct ColumnarReader {
column_dictionary: Dictionary<RangeSSTable>,
column_data: FileSlice,
num_rows: RowId,
format_version: Version,
}
impl fmt::Debug for ColumnarReader {
@@ -53,6 +54,7 @@ impl fmt::Debug for ColumnarReader {
fn read_all_columns_in_stream(
mut stream: sstable::Streamer<'_, RangeSSTable>,
column_data: &FileSlice,
format_version: Version,
) -> io::Result<Vec<DynamicColumnHandle>> {
let mut results = Vec::new();
while stream.advance() {
@@ -67,6 +69,7 @@ fn read_all_columns_in_stream(
let dynamic_column_handle = DynamicColumnHandle {
file_slice,
column_type,
format_version,
};
results.push(dynamic_column_handle);
}
@@ -88,7 +91,7 @@ impl ColumnarReader {
let num_rows = u32::deserialize(&mut &footer_bytes[8..12])?;
let version_footer_bytes: [u8; format_version::VERSION_FOOTER_NUM_BYTES] =
footer_bytes[12..].try_into().unwrap();
let _version = format_version::parse_footer(version_footer_bytes)?;
let format_version = format_version::parse_footer(version_footer_bytes)?;
let (column_data, sstable) =
file_slice_without_sstable_len.split_from_end(sstable_len as usize);
let column_dictionary = Dictionary::open(sstable)?;
@@ -96,6 +99,7 @@ impl ColumnarReader {
column_dictionary,
column_data,
num_rows,
format_version,
})
}
@@ -126,6 +130,7 @@ impl ColumnarReader {
let column_handle = DynamicColumnHandle {
file_slice,
column_type,
format_version: self.format_version,
};
Some((column_name, column_handle))
} else {
@@ -167,7 +172,7 @@ impl ColumnarReader {
.stream_for_column_range(column_name)
.into_stream_async()
.await?;
read_all_columns_in_stream(stream, &self.column_data)
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
}
/// Get all columns for the given column name.
@@ -176,7 +181,7 @@ impl ColumnarReader {
/// different types.
pub fn read_columns(&self, column_name: &str) -> io::Result<Vec<DynamicColumnHandle>> {
let stream = self.stream_for_column_range(column_name).into_stream()?;
read_all_columns_in_stream(stream, &self.column_data)
read_all_columns_in_stream(stream, &self.column_data, self.format_version)
}
/// Return the number of columns in the columnar.
@@ -195,7 +200,7 @@ mod tests {
columnar_writer.record_column_type("col1", ColumnType::Str, false);
columnar_writer.record_column_type("col2", ColumnType::U64, false);
let mut buffer = Vec::new();
columnar_writer.serialize(1, None, &mut buffer).unwrap();
columnar_writer.serialize(1, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
let columns = columnar.list_columns().unwrap();
assert_eq!(columns.len(), 2);
@@ -211,7 +216,7 @@ mod tests {
columnar_writer.record_column_type("count", ColumnType::U64, false);
columnar_writer.record_numerical(1, "count", 1u64);
let mut buffer = Vec::new();
columnar_writer.serialize(2, None, &mut buffer).unwrap();
columnar_writer.serialize(2, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
let columns = columnar.list_columns().unwrap();
assert_eq!(columns.len(), 1);

View File

@@ -41,31 +41,10 @@ impl ColumnWriter {
pub(super) fn operation_iterator<'a, V: SymbolValue>(
&self,
arena: &MemoryArena,
old_to_new_ids_opt: Option<&[RowId]>,
buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<V>> + 'a {
buffer.clear();
self.values.read_to_end(arena, buffer);
if let Some(old_to_new_ids) = old_to_new_ids_opt {
// TODO avoid the extra deserialization / serialization.
let mut sorted_ops: Vec<(RowId, ColumnOperation<V>)> = Vec::new();
let mut new_doc = 0u32;
let mut cursor = &buffer[..];
for op in std::iter::from_fn(|| ColumnOperation::<V>::deserialize(&mut cursor)) {
if let ColumnOperation::NewDoc(doc) = &op {
new_doc = old_to_new_ids[*doc as usize];
sorted_ops.push((new_doc, ColumnOperation::NewDoc(new_doc)));
} else {
sorted_ops.push((new_doc, op));
}
}
// stable sort is crucial here.
sorted_ops.sort_by_key(|(new_doc_id, _)| *new_doc_id);
buffer.clear();
for (_, op) in sorted_ops {
buffer.extend_from_slice(op.serialize().as_ref());
}
}
let mut cursor: &[u8] = &buffer[..];
std::iter::from_fn(move || ColumnOperation::deserialize(&mut cursor))
}
@@ -231,11 +210,9 @@ impl NumericalColumnWriter {
pub(super) fn operation_iterator<'a>(
self,
arena: &MemoryArena,
old_to_new_ids: Option<&[RowId]>,
buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<NumericalValue>> + 'a {
self.column_writer
.operation_iterator(arena, old_to_new_ids, buffer)
self.column_writer.operation_iterator(arena, buffer)
}
}
@@ -277,11 +254,9 @@ impl StrOrBytesColumnWriter {
pub(super) fn operation_iterator<'a>(
&self,
arena: &MemoryArena,
old_to_new_ids: Option<&[RowId]>,
byte_buffer: &'a mut Vec<u8>,
) -> impl Iterator<Item = ColumnOperation<UnorderedId>> + 'a {
self.column_writer
.operation_iterator(arena, old_to_new_ids, byte_buffer)
self.column_writer.operation_iterator(arena, byte_buffer)
}
}

View File

@@ -12,7 +12,7 @@ use common::CountingWriter;
pub(crate) use serializer::ColumnarSerializer;
use stacker::{Addr, ArenaHashMap, MemoryArena};
use crate::column_index::SerializableColumnIndex;
use crate::column_index::{SerializableColumnIndex, SerializableOptionalIndex};
use crate::column_values::{MonotonicallyMappableToU128, MonotonicallyMappableToU64};
use crate::columnar::column_type::ColumnType;
use crate::columnar::writer::column_writers::{
@@ -43,7 +43,7 @@ struct SpareBuffers {
/// columnar_writer.record_str(1u32 /* doc id */, "product_name", "Apple");
/// columnar_writer.record_numerical(0u32 /* doc id */, "price", 10.5f64); //< uh oh we ended up mixing integer and floats.
/// let mut wrt: Vec<u8> = Vec::new();
/// columnar_writer.serialize(2u32, None, &mut wrt).unwrap();
/// columnar_writer.serialize(2u32, &mut wrt).unwrap();
/// ```
#[derive(Default)]
pub struct ColumnarWriter {
@@ -75,63 +75,6 @@ impl ColumnarWriter {
.sum::<usize>()
}
/// Returns the list of doc ids from 0..num_docs sorted by the `sort_field`
/// column.
///
/// If the column is multivalued, use the first value for scoring.
/// If no value is associated to a specific row, the document is assigned
/// the lowest possible score.
///
/// The sort applied is stable.
pub fn sort_order(&self, sort_field: &str, num_docs: RowId, reversed: bool) -> Vec<u32> {
let Some(numerical_col_writer) = self
.numerical_field_hash_map
.get::<NumericalColumnWriter>(sort_field.as_bytes())
.or_else(|| {
self.datetime_field_hash_map
.get::<NumericalColumnWriter>(sort_field.as_bytes())
})
else {
return Vec::new();
};
let mut symbols_buffer = Vec::new();
let mut values = Vec::new();
let mut start_doc_check_fill = 0;
let mut current_doc_opt: Option<RowId> = None;
// Assumption: NewDoc will never call the same doc twice and is strictly increasing between
// calls
for op in numerical_col_writer.operation_iterator(&self.arena, None, &mut symbols_buffer) {
match op {
ColumnOperation::NewDoc(doc) => {
current_doc_opt = Some(doc);
}
ColumnOperation::Value(numerical_value) => {
if let Some(current_doc) = current_doc_opt {
// Fill up with 0.0 since last doc
values.extend((start_doc_check_fill..current_doc).map(|doc| (0.0, doc)));
start_doc_check_fill = current_doc + 1;
// handle multi values
current_doc_opt = None;
let score: f32 = f64::coerce(numerical_value) as f32;
values.push((score, current_doc));
}
}
}
}
for doc in values.len() as u32..num_docs {
values.push((0.0f32, doc));
}
values.sort_by(|(left_score, _), (right_score, _)| {
if reversed {
right_score.total_cmp(left_score)
} else {
left_score.total_cmp(right_score)
}
});
values.into_iter().map(|(_score, doc)| doc).collect()
}
/// Records a column type. This is useful to bypass the coercion process,
/// makes sure the empty is present in the resulting columnar, or set
/// the `sort_values_within_row`.
@@ -302,13 +245,9 @@ impl ColumnarWriter {
},
);
}
pub fn serialize(
&mut self,
num_docs: RowId,
old_to_new_row_ids: Option<&[RowId]>,
wrt: &mut dyn io::Write,
) -> io::Result<()> {
pub fn serialize(&mut self, num_docs: RowId, wrt: &mut dyn io::Write) -> io::Result<()> {
let mut serializer = ColumnarSerializer::new(wrt);
let mut columns: Vec<(&[u8], ColumnType, Addr)> = self
.numerical_field_hash_map
.iter()
@@ -322,7 +261,7 @@ impl ColumnarWriter {
columns.extend(
self.bytes_field_hash_map
.iter()
.map(|(term, addr)| (term, ColumnType::Bytes, addr)),
.map(|(column_name, addr)| (column_name, ColumnType::Bytes, addr)),
);
columns.extend(
self.str_field_hash_map
@@ -344,6 +283,7 @@ impl ColumnarWriter {
.iter()
.map(|(column_name, addr)| (column_name, ColumnType::DateTime, addr)),
);
// TODO: replace JSON_END_OF_PATH with b'0' in columns
columns.sort_unstable_by_key(|(column_name, col_type, _)| (*column_name, *col_type));
let (arena, buffers, dictionaries) = (&self.arena, &mut self.buffers, &self.dictionaries);
@@ -358,11 +298,7 @@ impl ColumnarWriter {
serialize_bool_column(
cardinality,
num_docs,
column_writer.operation_iterator(
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
@@ -376,11 +312,7 @@ impl ColumnarWriter {
serialize_ip_addr_column(
cardinality,
num_docs,
column_writer.operation_iterator(
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
@@ -405,11 +337,8 @@ impl ColumnarWriter {
num_docs,
str_or_bytes_column_writer.sort_values_within_row,
dictionary_builder,
str_or_bytes_column_writer.operation_iterator(
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
str_or_bytes_column_writer
.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&self.arena,
&mut column_serializer,
@@ -427,11 +356,7 @@ impl ColumnarWriter {
cardinality,
num_docs,
numerical_type,
numerical_column_writer.operation_iterator(
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
numerical_column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
@@ -446,11 +371,7 @@ impl ColumnarWriter {
cardinality,
num_docs,
NumericalType::I64,
column_writer.operation_iterator(
arena,
old_to_new_row_ids,
&mut symbol_byte_buffer,
),
column_writer.operation_iterator(arena, &mut symbol_byte_buffer),
buffers,
&mut column_serializer,
)?;
@@ -635,16 +556,16 @@ fn send_to_serialize_column_mappable_to_u128<
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
consume_operation_iterator(op_iterator, optional_index_builder, values);
let optional_index = optional_index_builder.finish(num_rows);
SerializableColumnIndex::Optional {
SerializableColumnIndex::Optional(SerializableOptionalIndex {
num_rows,
non_null_row_ids: Box::new(optional_index),
}
})
}
Cardinality::Multivalued => {
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
let multivalued_index = multivalued_index_builder.finish(num_rows);
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
let serializable_multivalued_index = multivalued_index_builder.finish(num_rows);
SerializableColumnIndex::Multivalued(serializable_multivalued_index)
}
};
crate::column::serialize_column_mappable_to_u128(
@@ -655,15 +576,6 @@ fn send_to_serialize_column_mappable_to_u128<
Ok(())
}
fn sort_values_within_row_in_place(multivalued_index: &[RowId], values: &mut [u64]) {
let mut start_index: usize = 0;
for end_index in multivalued_index.iter().copied() {
let end_index = end_index as usize;
values[start_index..end_index].sort_unstable();
start_index = end_index;
}
}
fn send_to_serialize_column_mappable_to_u64(
op_iterator: impl Iterator<Item = ColumnOperation<u64>>,
cardinality: Cardinality,
@@ -687,19 +599,22 @@ fn send_to_serialize_column_mappable_to_u64(
let optional_index_builder = value_index_builders.borrow_optional_index_builder();
consume_operation_iterator(op_iterator, optional_index_builder, values);
let optional_index = optional_index_builder.finish(num_rows);
SerializableColumnIndex::Optional {
SerializableColumnIndex::Optional(SerializableOptionalIndex {
non_null_row_ids: Box::new(optional_index),
num_rows,
}
})
}
Cardinality::Multivalued => {
let multivalued_index_builder = value_index_builders.borrow_multivalued_index_builder();
consume_operation_iterator(op_iterator, multivalued_index_builder, values);
let multivalued_index = multivalued_index_builder.finish(num_rows);
let serializable_multivalued_index = multivalued_index_builder.finish(num_rows);
if sort_values_within_row {
sort_values_within_row_in_place(multivalued_index, values);
sort_values_within_row_in_place(
serializable_multivalued_index.start_offsets.boxed_iter(),
values,
);
}
SerializableColumnIndex::Multivalued(Box::new(multivalued_index))
SerializableColumnIndex::Multivalued(serializable_multivalued_index)
}
};
crate::column::serialize_column_mappable_to_u64(
@@ -710,6 +625,18 @@ fn send_to_serialize_column_mappable_to_u64(
Ok(())
}
fn sort_values_within_row_in_place(
multivalued_index: impl Iterator<Item = RowId>,
values: &mut [u64],
) {
let mut start_index: usize = 0;
for end_index in multivalued_index {
let end_index = end_index as usize;
values[start_index..end_index].sort_unstable();
start_index = end_index;
}
}
fn coerce_numerical_symbol<T>(
operation_iterator: impl Iterator<Item = ColumnOperation<NumericalValue>>,
) -> impl Iterator<Item = ColumnOperation<u64>>
@@ -757,7 +684,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(3), Cardinality::Full);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&arena, None, &mut buffer)
.operation_iterator(&arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 6);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
@@ -786,7 +713,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(3), Cardinality::Optional);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&arena, None, &mut buffer)
.operation_iterator(&arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 4);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(1u32)));
@@ -809,7 +736,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(2), Cardinality::Optional);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&arena, None, &mut buffer)
.operation_iterator(&arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 2);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));
@@ -828,7 +755,7 @@ mod tests {
assert_eq!(column_writer.get_cardinality(1), Cardinality::Multivalued);
let mut buffer = Vec::new();
let symbols: Vec<ColumnOperation<NumericalValue>> = column_writer
.operation_iterator(&arena, None, &mut buffer)
.operation_iterator(&arena, &mut buffer)
.collect();
assert_eq!(symbols.len(), 3);
assert!(matches!(symbols[0], ColumnOperation::NewDoc(0u32)));

View File

@@ -1,6 +1,7 @@
use std::io;
use std::io::Write;
use common::json_path_writer::JSON_END_OF_PATH;
use common::{BinarySerializable, CountingWriter};
use sstable::value::RangeValueWriter;
use sstable::RangeSSTable;
@@ -18,13 +19,8 @@ pub struct ColumnarSerializer<W: io::Write> {
/// code.
fn prepare_key(key: &[u8], column_type: ColumnType, buffer: &mut Vec<u8>) {
buffer.clear();
// Convert 0 bytes to '0' string, as 0 bytes are reserved for the end of the path.
if key.contains(&0u8) {
buffer.extend(key.iter().map(|&b| if b == 0 { b'0' } else { b }));
} else {
buffer.extend_from_slice(key);
}
buffer.push(0u8);
buffer.extend_from_slice(key);
buffer.push(JSON_END_OF_PATH);
buffer.push(column_type.to_code());
}

View File

@@ -1,3 +1,4 @@
use crate::column_index::{SerializableMultivalueIndex, SerializableOptionalIndex};
use crate::iterable::Iterable;
use crate::RowId;
@@ -59,31 +60,47 @@ impl IndexBuilder for OptionalIndexBuilder {
#[derive(Default)]
pub struct MultivaluedIndexBuilder {
start_offsets: Vec<RowId>,
doc_with_values: Vec<RowId>,
start_offsets: Vec<u32>,
total_num_vals_seen: u32,
current_row: RowId,
current_row_has_value: bool,
}
impl MultivaluedIndexBuilder {
pub fn finish(&mut self, num_docs: RowId) -> &[u32] {
self.start_offsets
.resize(num_docs as usize + 1, self.total_num_vals_seen);
&self.start_offsets[..]
pub fn finish(&mut self, num_docs: RowId) -> SerializableMultivalueIndex<'_> {
self.start_offsets.push(self.total_num_vals_seen);
let non_null_row_ids: Box<dyn Iterable<RowId>> = Box::new(&self.doc_with_values[..]);
SerializableMultivalueIndex {
doc_ids_with_values: SerializableOptionalIndex {
non_null_row_ids,
num_rows: num_docs,
},
start_offsets: Box::new(&self.start_offsets[..]),
}
}
fn reset(&mut self) {
self.doc_with_values.clear();
self.start_offsets.clear();
self.start_offsets.push(0u32);
self.total_num_vals_seen = 0;
self.current_row = 0;
self.current_row_has_value = false;
}
}
impl IndexBuilder for MultivaluedIndexBuilder {
fn record_row(&mut self, row_id: RowId) {
self.start_offsets
.resize(row_id as usize + 1, self.total_num_vals_seen);
self.current_row = row_id;
self.current_row_has_value = false;
}
fn record_value(&mut self) {
if !self.current_row_has_value {
self.current_row_has_value = true;
self.doc_with_values.push(self.current_row);
self.start_offsets.push(self.total_num_vals_seen);
}
self.total_num_vals_seen += 1;
}
}
@@ -141,6 +158,32 @@ mod tests {
);
}
#[test]
fn test_multivalued_value_index_builder_simple() {
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
{
multivalued_value_index_builder.record_row(0u32);
multivalued_value_index_builder.record_value();
multivalued_value_index_builder.record_value();
let serialized_multivalue_index = multivalued_value_index_builder.finish(1u32);
let start_offsets: Vec<u32> = serialized_multivalue_index
.start_offsets
.boxed_iter()
.collect();
assert_eq!(&start_offsets, &[0, 2]);
}
multivalued_value_index_builder.reset();
multivalued_value_index_builder.record_row(0u32);
multivalued_value_index_builder.record_value();
multivalued_value_index_builder.record_value();
let serialized_multivalue_index = multivalued_value_index_builder.finish(1u32);
let start_offsets: Vec<u32> = serialized_multivalue_index
.start_offsets
.boxed_iter()
.collect();
assert_eq!(&start_offsets, &[0, 2]);
}
#[test]
fn test_multivalued_value_index_builder() {
let mut multivalued_value_index_builder = MultivaluedIndexBuilder::default();
@@ -149,17 +192,15 @@ mod tests {
multivalued_value_index_builder.record_value();
multivalued_value_index_builder.record_row(2u32);
multivalued_value_index_builder.record_value();
assert_eq!(
multivalued_value_index_builder.finish(4u32).to_vec(),
vec![0, 0, 2, 3, 3]
);
multivalued_value_index_builder.reset();
multivalued_value_index_builder.record_row(2u32);
multivalued_value_index_builder.record_value();
multivalued_value_index_builder.record_value();
assert_eq!(
multivalued_value_index_builder.finish(4u32).to_vec(),
vec![0, 0, 0, 2, 2]
);
let SerializableMultivalueIndex {
doc_ids_with_values,
start_offsets,
} = multivalued_value_index_builder.finish(4u32);
assert_eq!(doc_ids_with_values.num_rows, 4u32);
let doc_ids_with_values: Vec<u32> =
doc_ids_with_values.non_null_row_ids.boxed_iter().collect();
assert_eq!(&doc_ids_with_values, &[1u32, 2u32]);
let start_offsets: Vec<u32> = start_offsets.boxed_iter().collect();
assert_eq!(&start_offsets[..], &[0, 2, 3]);
}
}

View File

@@ -0,0 +1,183 @@
use std::path::PathBuf;
use itertools::Itertools;
use crate::{
merge_columnar, Cardinality, Column, ColumnarReader, DynamicColumn, StackMergeOrder,
CURRENT_VERSION,
};
const NUM_DOCS: u32 = u16::MAX as u32;
fn generate_columnar(num_docs: u32, value_offset: u64) -> Vec<u8> {
use crate::ColumnarWriter;
let mut columnar_writer = ColumnarWriter::default();
for i in 0..num_docs {
if i % 100 == 0 {
columnar_writer.record_numerical(i, "sparse", value_offset + i as u64);
}
if i % 5 == 0 {
columnar_writer.record_numerical(i, "dense", value_offset + i as u64);
}
columnar_writer.record_numerical(i, "full", value_offset + i as u64);
columnar_writer.record_numerical(i, "multi", value_offset + i as u64);
columnar_writer.record_numerical(i, "multi", value_offset + i as u64);
}
let mut wrt: Vec<u8> = Vec::new();
columnar_writer.serialize(num_docs, &mut wrt).unwrap();
wrt
}
#[test]
/// Writes a columnar for the CURRENT_VERSION to disk.
fn create_format() {
let version = CURRENT_VERSION.to_string();
let file_path = path_for_version(&version);
if PathBuf::from(file_path.clone()).exists() {
return;
}
let columnar = generate_columnar(NUM_DOCS, 0);
std::fs::write(file_path, columnar).unwrap();
}
fn path_for_version(version: &str) -> String {
format!("./compat_tests_data/{}.columnar", version)
}
#[test]
fn test_format_v1() {
let path = path_for_version("v1");
test_format(&path);
}
#[test]
fn test_format_v2() {
let path = path_for_version("v2");
test_format(&path);
}
fn test_format(path: &str) {
let file_content = std::fs::read(path).unwrap();
let reader = ColumnarReader::open(file_content).unwrap();
check_columns(&reader);
// Test merge
let reader2 = ColumnarReader::open(generate_columnar(NUM_DOCS, NUM_DOCS as u64)).unwrap();
let columnar_readers = vec![&reader, &reader2];
let merge_row_order = StackMergeOrder::stack(&columnar_readers[..]);
let mut out = Vec::new();
merge_columnar(&columnar_readers, &[], merge_row_order.into(), &mut out).unwrap();
let reader = ColumnarReader::open(out).unwrap();
check_columns(&reader);
}
fn check_columns(reader: &ColumnarReader) {
let column = open_column(reader, "full");
check_column(&column, |doc_id| vec![(doc_id, doc_id as u64).into()]);
assert_eq!(column.get_cardinality(), Cardinality::Full);
let column = open_column(reader, "multi");
check_column(&column, |doc_id| {
vec![
(doc_id * 2, doc_id as u64).into(),
(doc_id * 2 + 1, doc_id as u64).into(),
]
});
assert_eq!(column.get_cardinality(), Cardinality::Multivalued);
let column = open_column(reader, "sparse");
check_column(&column, |doc_id| {
if doc_id % 100 == 0 {
vec![(doc_id / 100, doc_id as u64).into()]
} else {
vec![]
}
});
assert_eq!(column.get_cardinality(), Cardinality::Optional);
let column = open_column(reader, "dense");
check_column(&column, |doc_id| {
if doc_id % 5 == 0 {
vec![(doc_id / 5, doc_id as u64).into()]
} else {
vec![]
}
});
assert_eq!(column.get_cardinality(), Cardinality::Optional);
}
struct RowIdAndValue {
row_id: u32,
value: u64,
}
impl From<(u32, u64)> for RowIdAndValue {
fn from((row_id, value): (u32, u64)) -> Self {
Self { row_id, value }
}
}
fn check_column<F: Fn(u32) -> Vec<RowIdAndValue>>(column: &Column<u64>, expected: F) {
let num_docs = column.num_docs();
let test_doc = |doc: u32| {
if expected(doc).is_empty() {
assert_eq!(column.first(doc), None);
} else {
assert_eq!(column.first(doc), Some(expected(doc)[0].value));
}
let values = column.values_for_doc(doc).collect_vec();
assert_eq!(values, expected(doc).iter().map(|x| x.value).collect_vec());
let mut row_ids = Vec::new();
column.row_ids_for_docs(&[doc], &mut vec![], &mut row_ids);
assert_eq!(
row_ids,
expected(doc).iter().map(|x| x.row_id).collect_vec()
);
let values = column.values_for_doc(doc).collect_vec();
assert_eq!(values, expected(doc).iter().map(|x| x.value).collect_vec());
// Docid rowid conversion
let mut row_ids = Vec::new();
let safe_next_doc = |doc: u32| (doc + 1).min(num_docs - 1);
column
.index
.docids_to_rowids(&[doc, safe_next_doc(doc)], &mut vec![], &mut row_ids);
let expected_rowids = expected(doc)
.iter()
.map(|x| x.row_id)
.chain(expected(safe_next_doc(doc)).iter().map(|x| x.row_id))
.collect_vec();
assert_eq!(row_ids, expected_rowids);
let rowid_range = column
.index
.docid_range_to_rowids(doc..safe_next_doc(doc) + 1);
if expected_rowids.is_empty() {
assert!(rowid_range.is_empty());
} else {
assert_eq!(
rowid_range,
expected_rowids[0]..expected_rowids.last().unwrap() + 1
);
}
};
test_doc(0);
test_doc(num_docs - 1);
test_doc(num_docs - 2);
test_doc(65000);
}
fn open_column(reader: &ColumnarReader, name: &str) -> Column<u64> {
let column = reader.read_columns(name).unwrap()[0]
.open()
.unwrap()
.coerce_numerical(crate::NumericalType::U64)
.unwrap();
let DynamicColumn::U64(column) = column else {
panic!();
};
column
}

View File

@@ -8,7 +8,7 @@ use common::{ByteCount, DateTime, HasLen, OwnedBytes};
use crate::column::{BytesColumn, Column, StrColumn};
use crate::column_values::{monotonic_map_column, StrictlyMonotonicFn};
use crate::columnar::ColumnType;
use crate::{Cardinality, ColumnIndex, ColumnValues, NumericalType};
use crate::{Cardinality, ColumnIndex, ColumnValues, NumericalType, Version};
#[derive(Clone)]
pub enum DynamicColumn {
@@ -232,6 +232,7 @@ static_dynamic_conversions!(Column<Ipv6Addr>, IpAddr);
pub struct DynamicColumnHandle {
pub(crate) file_slice: FileSlice,
pub(crate) column_type: ColumnType,
pub(crate) format_version: Version,
}
impl DynamicColumnHandle {
@@ -260,11 +261,15 @@ impl DynamicColumnHandle {
let column_bytes = self.file_slice.read_bytes()?;
match self.column_type {
ColumnType::Str | ColumnType::Bytes => {
let column: BytesColumn = crate::column::open_column_bytes(column_bytes)?;
let column: BytesColumn =
crate::column::open_column_bytes(column_bytes, self.format_version)?;
Ok(Some(column.term_ord_column))
}
ColumnType::IpAddr => {
let column = crate::column::open_column_u128_as_compact_u64(column_bytes)?;
let column = crate::column::open_column_u128_as_compact_u64(
column_bytes,
self.format_version,
)?;
Ok(Some(column))
}
ColumnType::Bool
@@ -272,7 +277,8 @@ impl DynamicColumnHandle {
| ColumnType::U64
| ColumnType::F64
| ColumnType::DateTime => {
let column = crate::column::open_column_u64::<u64>(column_bytes)?;
let column =
crate::column::open_column_u64::<u64>(column_bytes, self.format_version)?;
Ok(Some(column))
}
}
@@ -280,15 +286,31 @@ impl DynamicColumnHandle {
fn open_internal(&self, column_bytes: OwnedBytes) -> io::Result<DynamicColumn> {
let dynamic_column: DynamicColumn = match self.column_type {
ColumnType::Bytes => crate::column::open_column_bytes(column_bytes)?.into(),
ColumnType::Str => crate::column::open_column_str(column_bytes)?.into(),
ColumnType::I64 => crate::column::open_column_u64::<i64>(column_bytes)?.into(),
ColumnType::U64 => crate::column::open_column_u64::<u64>(column_bytes)?.into(),
ColumnType::F64 => crate::column::open_column_u64::<f64>(column_bytes)?.into(),
ColumnType::Bool => crate::column::open_column_u64::<bool>(column_bytes)?.into(),
ColumnType::IpAddr => crate::column::open_column_u128::<Ipv6Addr>(column_bytes)?.into(),
ColumnType::Bytes => {
crate::column::open_column_bytes(column_bytes, self.format_version)?.into()
}
ColumnType::Str => {
crate::column::open_column_str(column_bytes, self.format_version)?.into()
}
ColumnType::I64 => {
crate::column::open_column_u64::<i64>(column_bytes, self.format_version)?.into()
}
ColumnType::U64 => {
crate::column::open_column_u64::<u64>(column_bytes, self.format_version)?.into()
}
ColumnType::F64 => {
crate::column::open_column_u64::<f64>(column_bytes, self.format_version)?.into()
}
ColumnType::Bool => {
crate::column::open_column_u64::<bool>(column_bytes, self.format_version)?.into()
}
ColumnType::IpAddr => {
crate::column::open_column_u128::<Ipv6Addr>(column_bytes, self.format_version)?
.into()
}
ColumnType::DateTime => {
crate::column::open_column_u64::<DateTime>(column_bytes)?.into()
crate::column::open_column_u64::<DateTime>(column_bytes, self.format_version)?
.into()
}
};
Ok(dynamic_column)

View File

@@ -1,4 +1,7 @@
use std::ops::Range;
use std::sync::Arc;
use crate::{ColumnValues, RowId};
pub trait Iterable<T = u64> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = T> + '_>;
@@ -17,3 +20,9 @@ where Range<T>: Iterator<Item = T>
Box::new(self.clone())
}
}
impl Iterable for Arc<dyn crate::ColumnValues<RowId>> {
fn boxed_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(self.iter().map(|row_id| row_id as u64))
}
}

View File

@@ -48,7 +48,7 @@ pub use column_values::{
};
pub use columnar::{
merge_columnar, ColumnType, ColumnarReader, ColumnarWriter, HasAssociatedColumnType,
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder,
MergeRowOrder, ShuffleMergeOrder, StackMergeOrder, Version, CURRENT_VERSION,
};
use sstable::VoidSSTable;
pub use value::{NumericalType, NumericalValue};
@@ -131,3 +131,6 @@ impl Cardinality {
#[cfg(test)]
mod tests;
#[cfg(test)]
mod compat_tests;

View File

@@ -21,7 +21,7 @@ fn test_dataframe_writer_str() {
dataframe_writer.record_str(1u32, "my_string", "hello");
dataframe_writer.record_str(3u32, "my_string", "helloeee");
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
@@ -35,7 +35,7 @@ fn test_dataframe_writer_bytes() {
dataframe_writer.record_bytes(1u32, "my_string", b"hello");
dataframe_writer.record_bytes(3u32, "my_string", b"helloeee");
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("my_string").unwrap();
@@ -49,7 +49,7 @@ fn test_dataframe_writer_bool() {
dataframe_writer.record_bool(1u32, "bool.value", false);
dataframe_writer.record_bool(3u32, "bool.value", true);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("bool.value").unwrap();
@@ -74,12 +74,12 @@ fn test_dataframe_writer_u64_multivalued() {
dataframe_writer.record_numerical(6u32, "divisor", 2u64);
dataframe_writer.record_numerical(6u32, "divisor", 3u64);
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(7, None, &mut buffer).unwrap();
dataframe_writer.serialize(7, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("divisor").unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0].num_bytes(), 29);
assert_eq!(cols[0].num_bytes(), 50);
let dyn_i64_col = cols[0].open().unwrap();
let DynamicColumn::I64(divisor_col) = dyn_i64_col else {
panic!();
@@ -97,7 +97,7 @@ fn test_dataframe_writer_ip_addr() {
dataframe_writer.record_ip_addr(1, "ip_addr", Ipv6Addr::from_u128(1001));
dataframe_writer.record_ip_addr(3, "ip_addr", Ipv6Addr::from_u128(1050));
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(5, None, &mut buffer).unwrap();
dataframe_writer.serialize(5, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("ip_addr").unwrap();
@@ -128,7 +128,7 @@ fn test_dataframe_writer_numerical() {
dataframe_writer.record_numerical(2u32, "srical.value", NumericalValue::U64(13u64));
dataframe_writer.record_numerical(4u32, "srical.value", NumericalValue::U64(15u64));
let mut buffer: Vec<u8> = Vec::new();
dataframe_writer.serialize(6, None, &mut buffer).unwrap();
dataframe_writer.serialize(6, &mut buffer).unwrap();
let columnar = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar.num_columns(), 1);
let cols: Vec<DynamicColumnHandle> = columnar.read_columns("srical.value").unwrap();
@@ -153,46 +153,6 @@ fn test_dataframe_writer_numerical() {
assert_eq!(column_i64.first(6), None); //< we can change the spec for that one.
}
#[test]
fn test_dataframe_sort_by_full() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(0u32, "value", NumericalValue::U64(1));
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
let data = dataframe_writer.sort_order("value", 2, false);
assert_eq!(data, vec![0, 1]);
}
#[test]
fn test_dataframe_sort_by_opt() {
let mut dataframe_writer = ColumnarWriter::default();
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(3));
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(2));
let data = dataframe_writer.sort_order("value", 5, false);
// 0, 2, 4 is 0.0
assert_eq!(data, vec![0, 2, 4, 3, 1]);
let data = dataframe_writer.sort_order("value", 5, true);
assert_eq!(
data,
vec![4, 2, 0, 3, 1].into_iter().rev().collect::<Vec<_>>()
);
}
#[test]
fn test_dataframe_sort_by_multi() {
let mut dataframe_writer = ColumnarWriter::default();
// valid for sort
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(2));
// those are ignored for sort
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
dataframe_writer.record_numerical(1u32, "value", NumericalValue::U64(4));
// valid for sort
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(3));
// ignored, would change sort order
dataframe_writer.record_numerical(3u32, "value", NumericalValue::U64(1));
let data = dataframe_writer.sort_order("value", 4, false);
assert_eq!(data, vec![0, 2, 1, 3]);
}
#[test]
fn test_dictionary_encoded_str() {
let mut buffer = Vec::new();
@@ -201,7 +161,7 @@ fn test_dictionary_encoded_str() {
columnar_writer.record_str(3, "my.column", "c");
columnar_writer.record_str(3, "my.column2", "different_column!");
columnar_writer.record_str(4, "my.column", "b");
columnar_writer.serialize(5, None, &mut buffer).unwrap();
columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap();
@@ -235,7 +195,7 @@ fn test_dictionary_encoded_bytes() {
columnar_writer.record_bytes(3, "my.column", b"c");
columnar_writer.record_bytes(3, "my.column2", b"different_column!");
columnar_writer.record_bytes(4, "my.column", b"b");
columnar_writer.serialize(5, None, &mut buffer).unwrap();
columnar_writer.serialize(5, &mut buffer).unwrap();
let columnar_reader = ColumnarReader::open(buffer).unwrap();
assert_eq!(columnar_reader.num_columns(), 2);
let col_handles = columnar_reader.read_columns("my.column").unwrap();
@@ -344,7 +304,7 @@ fn column_value_strategy() -> impl Strategy<Value = ColumnValue> {
ip_addr_byte
))),
1 => any::<bool>().prop_map(ColumnValue::Bool),
1 => (0_679_723_993i64..1_679_723_995i64)
1 => (679_723_993i64..1_679_723_995i64)
.prop_map(|val| { ColumnValue::DateTime(DateTime::from_timestamp_secs(val)) })
]
}
@@ -369,26 +329,12 @@ fn columnar_docs_strategy() -> impl Strategy<Value = Vec<Vec<(&'static str, Colu
.prop_flat_map(|num_docs| proptest::collection::vec(doc_strategy(), num_docs))
}
fn columnar_docs_and_mapping_strategy(
) -> impl Strategy<Value = (Vec<Vec<(&'static str, ColumnValue)>>, Vec<RowId>)> {
columnar_docs_strategy().prop_flat_map(|docs| {
permutation_strategy(docs.len()).prop_map(move |permutation| (docs.clone(), permutation))
})
}
fn permutation_strategy(n: usize) -> impl Strategy<Value = Vec<RowId>> {
Just((0u32..n as RowId).collect()).prop_shuffle()
}
fn permutation_and_subset_strategy(n: usize) -> impl Strategy<Value = Vec<usize>> {
let vals: Vec<usize> = (0..n).collect();
subsequence(vals, 0..=n).prop_shuffle()
}
fn build_columnar_with_mapping(
docs: &[Vec<(&'static str, ColumnValue)>],
old_to_new_row_ids_opt: Option<&[RowId]>,
) -> ColumnarReader {
fn build_columnar_with_mapping(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
let num_docs = docs.len() as u32;
let mut buffer = Vec::new();
let mut columnar_writer = ColumnarWriter::default();
@@ -416,15 +362,13 @@ fn build_columnar_with_mapping(
}
}
}
columnar_writer
.serialize(num_docs, old_to_new_row_ids_opt, &mut buffer)
.unwrap();
columnar_writer.serialize(num_docs, &mut buffer).unwrap();
ColumnarReader::open(buffer).unwrap()
}
fn build_columnar(docs: &[Vec<(&'static str, ColumnValue)>]) -> ColumnarReader {
build_columnar_with_mapping(docs, None)
build_columnar_with_mapping(docs)
}
fn assert_columnar_eq_strict(left: &ColumnarReader, right: &ColumnarReader) {
@@ -448,6 +392,7 @@ fn assert_columnar_eq(
}
}
#[track_caller]
fn assert_column_eq<T: Copy + PartialOrd + Debug + Send + Sync + 'static>(
left: &Column<T>,
right: &Column<T>,
@@ -683,54 +628,6 @@ proptest! {
}
}
// Same as `test_single_columnar_builder_proptest` but with a shuffling mapping.
proptest! {
#![proptest_config(ProptestConfig::with_cases(500))]
#[test]
fn test_single_columnar_builder_with_shuffle_proptest((docs, mapping) in columnar_docs_and_mapping_strategy()) {
let columnar = build_columnar_with_mapping(&docs[..], Some(&mapping));
assert_eq!(columnar.num_rows() as usize, docs.len());
let mut expected_columns: HashMap<(&str, ColumnTypeCategory), HashMap<u32, Vec<&ColumnValue>> > = Default::default();
for (doc_id, doc_vals) in docs.iter().enumerate() {
for (col_name, col_val) in doc_vals {
expected_columns
.entry((col_name, col_val.column_type_category()))
.or_default()
.entry(mapping[doc_id])
.or_default()
.push(col_val);
}
}
let column_list = columnar.list_columns().unwrap();
assert_eq!(expected_columns.len(), column_list.len());
for (column_name, column) in column_list {
let dynamic_column = column.open().unwrap();
let col_category: ColumnTypeCategory = dynamic_column.column_type().into();
let expected_col_values: &HashMap<u32, Vec<&ColumnValue>> = expected_columns.get(&(column_name.as_str(), col_category)).unwrap();
for _doc_id in 0..columnar.num_rows() {
match &dynamic_column {
DynamicColumn::Bool(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::I64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::U64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::F64(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::IpAddr(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::DateTime(col) =>
assert_column_values(col, expected_col_values),
DynamicColumn::Bytes(col) =>
assert_bytes_column_values(col, expected_col_values, false),
DynamicColumn::Str(col) =>
assert_bytes_column_values(col, expected_col_values, true),
}
}
}
}
}
// This tests create 2 or 3 random small columnar and attempts to merge them.
// It compares the resulting merged dataframe with what would have been obtained by building the
// dataframe from the concatenated rows to begin with.
@@ -844,24 +741,68 @@ fn columnar_docs_and_remap(
proptest! {
#![proptest_config(ProptestConfig::with_cases(1000))]
#[test]
fn test_columnar_merge_and_remap_proptest((columnar_docs, shuffle_merge_order) in columnar_docs_and_remap()) {
let shuffled_rows: Vec<Vec<(&'static str, ColumnValue)>> = shuffle_merge_order.iter()
.map(|row_addr| columnar_docs[row_addr.segment_ord as usize][row_addr.row_id as usize].clone())
.collect();
let expected_merged_columnar = build_columnar(&shuffled_rows[..]);
let columnar_readers: Vec<ColumnarReader> = columnar_docs.iter()
.map(|docs| build_columnar(&docs[..]))
.collect::<Vec<_>>();
let columnar_readers_arr: Vec<&ColumnarReader> = columnar_readers.iter().collect();
let mut output: Vec<u8> = Vec::new();
let segment_num_rows: Vec<RowId> = columnar_docs.iter().map(|docs| docs.len() as RowId).collect();
let shuffle_merge_order = ShuffleMergeOrder::for_test(&segment_num_rows, shuffle_merge_order);
crate::merge_columnar(&columnar_readers_arr[..], &[], shuffle_merge_order.into(), &mut output).unwrap();
let merged_columnar = ColumnarReader::open(output).unwrap();
assert_columnar_eq(&merged_columnar, &expected_merged_columnar, true);
fn test_columnar_merge_and_remap_proptest((columnar_docs, shuffle_merge_order) in
columnar_docs_and_remap()) {
test_columnar_merge_and_remap(columnar_docs, shuffle_merge_order);
}
}
fn test_columnar_merge_and_remap(
columnar_docs: Vec<Vec<Vec<(&'static str, ColumnValue)>>>,
shuffle_merge_order: Vec<RowAddr>,
) {
let shuffled_rows: Vec<Vec<(&'static str, ColumnValue)>> = shuffle_merge_order
.iter()
.map(|row_addr| {
columnar_docs[row_addr.segment_ord as usize][row_addr.row_id as usize].clone()
})
.collect();
let expected_merged_columnar = build_columnar(&shuffled_rows[..]);
let columnar_readers: Vec<ColumnarReader> = columnar_docs
.iter()
.map(|docs| build_columnar(&docs[..]))
.collect::<Vec<_>>();
let columnar_readers_ref: Vec<&ColumnarReader> = columnar_readers.iter().collect();
let mut output: Vec<u8> = Vec::new();
let segment_num_rows: Vec<RowId> = columnar_docs
.iter()
.map(|docs| docs.len() as RowId)
.collect();
let shuffle_merge_order = ShuffleMergeOrder::for_test(&segment_num_rows, shuffle_merge_order);
crate::merge_columnar(
&columnar_readers_ref[..],
&[],
shuffle_merge_order.into(),
&mut output,
)
.unwrap();
let merged_columnar = ColumnarReader::open(output).unwrap();
assert_columnar_eq(&merged_columnar, &expected_merged_columnar, true);
}
#[test]
fn test_columnar_merge_and_remap_bug_1() {
let columnar_docs = vec![vec![
vec![
("c1", ColumnValue::Numerical(NumericalValue::U64(0))),
("c1", ColumnValue::Numerical(NumericalValue::U64(0))),
],
vec![],
]];
let shuffle_merge_order: Vec<RowAddr> = vec![
RowAddr {
segment_ord: 0,
row_id: 1,
},
RowAddr {
segment_ord: 0,
row_id: 0,
},
];
test_columnar_merge_and_remap(columnar_docs, shuffle_merge_order);
}
#[test]
fn test_columnar_merge_empty() {
let columnar_reader_1 = build_columnar(&[]);

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

View File

@@ -7,6 +7,11 @@
- [Other](#other)
- [Usage](#usage)
# Index Sorting has been removed!
More infos here:
https://github.com/quickwit-oss/tantivy/issues/2352
# Index Sorting
Tantivy allows you to sort the index according to a property.

View File

@@ -127,7 +127,7 @@ mod tests {
fast_field_writers
.add_document(&doc!(*FIELD=>2u64))
.unwrap();
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -178,7 +178,7 @@ mod tests {
fast_field_writers
.add_document(&doc!(*FIELD=>215u64))
.unwrap();
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -211,7 +211,7 @@ mod tests {
.add_document(&doc!(*FIELD=>100_000u64))
.unwrap();
}
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -243,7 +243,7 @@ mod tests {
.add_document(&doc!(*FIELD=>5_000_000_000_000_000_000u64 + doc_id))
.unwrap();
}
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -276,7 +276,7 @@ mod tests {
doc.add_i64(i64_field, i);
fast_field_writers.add_document(&doc).unwrap();
}
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -315,7 +315,7 @@ mod tests {
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
let doc = TantivyDocument::default();
fast_field_writers.add_document(&doc).unwrap();
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
@@ -348,7 +348,7 @@ mod tests {
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
let doc = TantivyDocument::default();
fast_field_writers.add_document(&doc).unwrap();
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
@@ -385,7 +385,7 @@ mod tests {
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x)).unwrap();
}
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -770,7 +770,7 @@ mod tests {
fast_field_writers
.add_document(&doc!(field=>false))
.unwrap();
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -802,7 +802,7 @@ mod tests {
.add_document(&doc!(field=>false))
.unwrap();
}
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -827,7 +827,7 @@ mod tests {
let mut fast_field_writers = FastFieldsWriter::from_schema(&schema).unwrap();
let doc = TantivyDocument::default();
fast_field_writers.add_document(&doc).unwrap();
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
let file = directory.open_read(path).unwrap();
@@ -855,7 +855,7 @@ mod tests {
for doc in docs {
fast_field_writers.add_document(doc).unwrap();
}
fast_field_writers.serialize(&mut write, None).unwrap();
fast_field_writers.serialize(&mut write).unwrap();
write.terminate().unwrap();
}
Ok(directory)

View File

@@ -4,7 +4,6 @@ use columnar::{ColumnarWriter, NumericalValue};
use common::{DateTimePrecision, JsonPathWriter};
use tokenizer_api::Token;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::schema::document::{Document, ReferenceValue, ReferenceValueLeaf, Value};
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema, Type};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
@@ -106,16 +105,6 @@ impl FastFieldsWriter {
self.columnar_writer.mem_usage()
}
pub(crate) fn sort_order(
&self,
sort_field: &str,
num_docs: DocId,
reversed: bool,
) -> Vec<DocId> {
self.columnar_writer
.sort_order(sort_field, num_docs, reversed)
}
/// Indexes all of the fastfields of a new document.
pub fn add_document<D: Document>(&mut self, doc: &D) -> crate::Result<()> {
let doc_id = self.num_docs;
@@ -233,16 +222,9 @@ impl FastFieldsWriter {
/// Serializes all of the `FastFieldWriter`s by pushing them in
/// order to the fast field serializer.
pub fn serialize(
mut self,
wrt: &mut dyn io::Write,
doc_id_map_opt: Option<&DocIdMapping>,
) -> io::Result<()> {
pub fn serialize(mut self, wrt: &mut dyn io::Write) -> io::Result<()> {
let num_docs = self.num_docs;
let old_to_new_row_ids =
doc_id_map_opt.map(|doc_id_mapping| doc_id_mapping.old_to_new_ids());
self.columnar_writer
.serialize(num_docs, old_to_new_row_ids, wrt)?;
self.columnar_writer.serialize(num_docs, wrt)?;
Ok(())
}
}
@@ -392,7 +374,7 @@ mod tests {
}
let mut buffer = Vec::new();
columnar_writer
.serialize(json_docs.len() as DocId, None, &mut buffer)
.serialize(json_docs.len() as DocId, &mut buffer)
.unwrap();
ColumnarReader::open(buffer).unwrap()
}

View File

@@ -77,7 +77,7 @@ mod tests {
let mut fieldnorm_writers = FieldNormsWriter::for_schema(&SCHEMA);
fieldnorm_writers.record(2u32, *TXT_FIELD, 5);
fieldnorm_writers.record(3u32, *TXT_FIELD, 3);
fieldnorm_writers.serialize(serializer, None)?;
fieldnorm_writers.serialize(serializer)?;
}
let file = directory.open_read(path)?;
{

View File

@@ -2,7 +2,6 @@ use std::cmp::Ordering;
use std::{io, iter};
use super::{fieldnorm_to_id, FieldNormsSerializer};
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::schema::{Field, Schema};
use crate::DocId;
@@ -92,11 +91,7 @@ impl FieldNormsWriter {
}
/// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(
&self,
mut fieldnorms_serializer: FieldNormsSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> {
pub fn serialize(&self, mut fieldnorms_serializer: FieldNormsSerializer) -> io::Result<()> {
for (field, fieldnorms_buffer) in self.fieldnorms_buffers.iter().enumerate().filter_map(
|(field_id, fieldnorms_buffer_opt)| {
fieldnorms_buffer_opt.as_ref().map(|fieldnorms_buffer| {
@@ -104,12 +99,7 @@ impl FieldNormsWriter {
})
},
) {
if let Some(doc_id_map) = doc_id_map {
let remapped_fieldnorm_buffer = doc_id_map.remap(fieldnorms_buffer);
fieldnorms_serializer.serialize_field(field, &remapped_fieldnorm_buffer)?;
} else {
fieldnorms_serializer.serialize_field(field, fieldnorms_buffer)?;
}
fieldnorms_serializer.serialize_field(field, fieldnorms_buffer)?;
}
fieldnorms_serializer.close()?;
Ok(())

View File

@@ -7,7 +7,7 @@ use rand::{thread_rng, Rng};
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::schema::*;
#[allow(deprecated)]
use crate::{doc, schema, Index, IndexSettings, IndexSortByField, IndexWriter, Order, Searcher};
use crate::{doc, schema, Index, IndexWriter, Searcher};
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
assert!(searcher.segment_readers().len() < 20);
@@ -65,71 +65,6 @@ fn get_num_iterations() -> usize {
.map(|str| str.parse().unwrap())
.unwrap_or(2000)
}
#[test]
#[ignore]
fn test_functional_indexing_sorted() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED | FAST);
let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
let text_field_options = TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
.set_index_option(schema::IndexRecordOption::WithFreqsAndPositions),
)
.set_stored();
let text_field = schema_builder.add_text_field("text_field", text_field_options);
let schema = schema_builder.build();
let mut index_builder = Index::builder().schema(schema);
index_builder = index_builder.settings(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Desc,
}),
..Default::default()
});
let index = index_builder.create_from_tempdir().unwrap();
let reader = index.reader()?;
let mut rng = thread_rng();
let mut index_writer: IndexWriter =
index.writer_with_num_threads(3, 3 * MEMORY_BUDGET_NUM_BYTES_MIN)?;
let mut committed_docs: HashSet<u64> = HashSet::new();
let mut uncommitted_docs: HashSet<u64> = HashSet::new();
for _ in 0..get_num_iterations() {
let random_val = rng.gen_range(0..20);
if random_val == 0 {
index_writer.commit()?;
committed_docs.extend(&uncommitted_docs);
uncommitted_docs.clear();
reader.reload()?;
let searcher = reader.searcher();
// check that everything is correct.
check_index_content(
&searcher,
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
)?;
} else if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u64(id_field, random_val);
index_writer.delete_term(doc_id_term);
} else {
uncommitted_docs.insert(random_val);
let mut doc = TantivyDocument::new();
doc.add_u64(id_field, random_val);
for i in 1u64..10u64 {
doc.add_u64(multiples_field, random_val * i);
}
doc.add_text(text_field, get_text());
index_writer.add_document(doc)?;
}
}
Ok(())
}
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod \
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, \

View File

@@ -20,7 +20,7 @@ use crate::indexer::segment_updater::save_metas;
use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
use crate::reader::{IndexReader, IndexReaderBuilder};
use crate::schema::document::Document;
use crate::schema::{Field, FieldType, Schema, Type};
use crate::schema::{Field, FieldType, Schema};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::SegmentReader;
@@ -232,31 +232,7 @@ impl IndexBuilder {
}
fn validate(&self) -> crate::Result<()> {
if let Some(schema) = self.schema.as_ref() {
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
let schema_field = schema.get_field(&sort_by_field.field).map_err(|_| {
TantivyError::InvalidArgument(format!(
"Field to sort index {} not found in schema",
sort_by_field.field
))
})?;
let entry = schema.get_field_entry(schema_field);
if !entry.is_fast() {
return Err(TantivyError::InvalidArgument(format!(
"Field {} is no fast field. Field needs to be a single value fast field \
to be used to sort an index",
sort_by_field.field
)));
}
let supported_field_types = [Type::I64, Type::U64, Type::F64, Type::Date];
let field_type = entry.field_type().value_type();
if !supported_field_types.contains(&field_type) {
return Err(TantivyError::InvalidArgument(format!(
"Unsupported field type in sort_by_field: {field_type:?}. Supported field \
types: {supported_field_types:?} ",
)));
}
}
if let Some(_schema) = self.schema.as_ref() {
Ok(())
} else {
Err(TantivyError::InvalidArgument(

View File

@@ -249,10 +249,6 @@ fn is_true(val: &bool) -> bool {
/// index, like presort documents.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct IndexSettings {
/// Sorts the documents by information
/// provided in `IndexSortByField`
#[serde(skip_serializing_if = "Option::is_none")]
pub sort_by_field: Option<IndexSortByField>,
/// The `Compressor` used to compress the doc store.
#[serde(default)]
pub docstore_compression: Compressor,
@@ -275,7 +271,6 @@ fn default_docstore_blocksize() -> usize {
impl Default for IndexSettings {
fn default() -> Self {
Self {
sort_by_field: None,
docstore_compression: Compressor::default(),
docstore_blocksize: default_docstore_blocksize(),
docstore_compress_dedicated_thread: true,
@@ -283,22 +278,6 @@ impl Default for IndexSettings {
}
}
/// Settings to presort the documents in an index
///
/// Presorting documents can greatly improve performance
/// in some scenarios, by applying top n
/// optimizations.
#[deprecated(
since = "0.22.0",
note = "We plan to remove index sorting in `0.23`. If you need index sorting, please comment on the related issue https://github.com/quickwit-oss/tantivy/issues/2352 and explain your use case."
)]
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct IndexSortByField {
/// The field to sort the documents by
pub field: String,
/// The order to sort the documents by
pub order: Order,
}
/// The order to sort by
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub enum Order {
@@ -417,7 +396,7 @@ mod tests {
use crate::store::Compressor;
#[cfg(feature = "zstd-compression")]
use crate::store::ZstdCompressor;
use crate::{IndexSettings, IndexSortByField, Order};
use crate::IndexSettings;
#[test]
fn test_serialize_metas() {
@@ -427,13 +406,7 @@ mod tests {
schema_builder.build()
};
let index_metas = IndexMeta {
index_settings: IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "text".to_string(),
order: Order::Asc,
}),
..Default::default()
},
index_settings: IndexSettings::default(),
segments: Vec::new(),
schema,
opstamp: 0u64,
@@ -442,7 +415,7 @@ mod tests {
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!(
json,
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"lz4","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
r#"{"index_settings":{"docstore_compression":"lz4","docstore_blocksize":16384},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
);
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
@@ -461,10 +434,6 @@ mod tests {
};
let index_metas = IndexMeta {
index_settings: IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "text".to_string(),
order: Order::Asc,
}),
docstore_compression: crate::store::Compressor::Zstd(ZstdCompressor {
compression_level: Some(4),
}),
@@ -479,7 +448,7 @@ mod tests {
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!(
json,
r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(compression_level=4)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
r#"{"index_settings":{"docstore_compression":"zstd(compression_level=4)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#
);
let deser_meta: UntrackedIndexMeta = serde_json::from_str(&json).unwrap();
@@ -491,35 +460,35 @@ mod tests {
#[test]
#[cfg(all(feature = "lz4-compression", feature = "zstd-compression"))]
fn test_serialize_metas_invalid_comp() {
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zsstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let json = r#"{"index_settings":{"docstore_compression":"zsstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
assert_eq!(
err.to_string(),
"unknown variant `zsstd`, expected one of `none`, `lz4`, `zstd`, \
`zstd(compression_level=5)` at line 1 column 96"
`zstd(compression_level=5)` at line 1 column 49"
.to_string()
);
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd(bla=10)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let json = r#"{"index_settings":{"docstore_compression":"zstd(bla=10)","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
assert_eq!(
err.to_string(),
"unknown zstd option \"bla\" at line 1 column 103".to_string()
"unknown zstd option \"bla\" at line 1 column 56".to_string()
);
}
#[test]
#[cfg(not(feature = "zstd-compression"))]
fn test_serialize_metas_unsupported_comp() {
let json = r#"{"index_settings":{"sort_by_field":{"field":"text","order":"Asc"},"docstore_compression":"zstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let json = r#"{"index_settings":{"docstore_compression":"zstd","docstore_blocksize":1000000},"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","fieldnorms":true,"tokenizer":"default"},"stored":false,"fast":false}}],"opstamp":0}"#;
let err = serde_json::from_str::<UntrackedIndexMeta>(json).unwrap_err();
assert_eq!(
err.to_string(),
"unsupported variant `zstd`, please enable Tantivy's `zstd-compression` feature at \
line 1 column 95"
line 1 column 48"
.to_string()
);
}
@@ -531,7 +500,6 @@ mod tests {
assert_eq!(
index_settings,
IndexSettings {
sort_by_field: None,
docstore_compression: Compressor::default(),
docstore_compress_dedicated_thread: true,
docstore_blocksize: 16_384

View File

@@ -12,7 +12,7 @@ mod segment_reader;
pub use self::index::{Index, IndexBuilder};
pub(crate) use self::index_meta::SegmentMetaInventory;
pub use self::index_meta::{IndexMeta, IndexSettings, IndexSortByField, Order, SegmentMeta};
pub use self::index_meta::{IndexMeta, IndexSettings, Order, SegmentMeta};
pub use self::inverted_index_reader::InvertedIndexReader;
pub use self::segment::Segment;
pub use self::segment_component::SegmentComponent;

View File

@@ -3,15 +3,12 @@
use common::ReadOnlyBitSet;
use super::SegmentWriter;
use crate::schema::{Field, Schema};
use crate::{DocAddress, DocId, IndexSortByField, TantivyError};
use crate::DocAddress;
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum MappingType {
Stacked,
StackedWithDeletes,
Shuffled,
}
/// Struct to provide mapping from new doc_id to old doc_id and segment.
@@ -46,537 +43,4 @@ impl SegmentDocIdMapping {
pub(crate) fn iter_old_doc_addrs(&self) -> impl Iterator<Item = DocAddress> + '_ {
self.new_doc_id_to_old_doc_addr.iter().copied()
}
/// This flags means the segments are simply stacked in the order of their ordinal.
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
///
/// The different segment may present some deletes, in which case it is expressed by skipping a
/// `DocId`. [(0, 1), (0, 3)] <--- here doc_id=0 and doc_id=1 have been deleted
///
/// Being trivial is equivalent to having the `new_doc_id_to_old_doc_addr` array sorted.
///
/// This allows for some optimization.
pub(crate) fn is_trivial(&self) -> bool {
match self.mapping_type {
MappingType::Stacked | MappingType::StackedWithDeletes => true,
MappingType::Shuffled => false,
}
}
}
/// Struct to provide mapping from old doc_id to new doc_id and vice versa within a segment.
pub struct DocIdMapping {
new_doc_id_to_old: Vec<DocId>,
old_doc_id_to_new: Vec<DocId>,
}
impl DocIdMapping {
pub fn from_new_id_to_old_id(new_doc_id_to_old: Vec<DocId>) -> Self {
let max_doc = new_doc_id_to_old.len();
let old_max_doc = new_doc_id_to_old
.iter()
.cloned()
.max()
.map(|n| n + 1)
.unwrap_or(0);
let mut old_doc_id_to_new = vec![0; old_max_doc as usize];
for i in 0..max_doc {
old_doc_id_to_new[new_doc_id_to_old[i] as usize] = i as DocId;
}
DocIdMapping {
new_doc_id_to_old,
old_doc_id_to_new,
}
}
/// returns the new doc_id for the old doc_id
pub fn get_new_doc_id(&self, doc_id: DocId) -> DocId {
self.old_doc_id_to_new[doc_id as usize]
}
/// returns the old doc_id for the new doc_id
pub fn get_old_doc_id(&self, doc_id: DocId) -> DocId {
self.new_doc_id_to_old[doc_id as usize]
}
/// iterate over old doc_ids in order of the new doc_ids
pub fn iter_old_doc_ids(&self) -> impl Iterator<Item = DocId> + Clone + '_ {
self.new_doc_id_to_old.iter().cloned()
}
pub fn old_to_new_ids(&self) -> &[DocId] {
&self.old_doc_id_to_new[..]
}
/// Remaps a given array to the new doc ids.
pub fn remap<T: Copy>(&self, els: &[T]) -> Vec<T> {
self.new_doc_id_to_old
.iter()
.map(|old_doc| els[*old_doc as usize])
.collect()
}
pub fn num_new_doc_ids(&self) -> usize {
self.new_doc_id_to_old.len()
}
pub fn num_old_doc_ids(&self) -> usize {
self.old_doc_id_to_new.len()
}
}
pub(crate) fn expect_field_id_for_sort_field(
schema: &Schema,
sort_by_field: &IndexSortByField,
) -> crate::Result<Field> {
schema.get_field(&sort_by_field.field).map_err(|_| {
TantivyError::InvalidArgument(format!(
"field to sort index by not found: {:?}",
sort_by_field.field
))
})
}
// Generates a document mapping in the form of [index new doc_id] -> old doc_id
// TODO detect if field is already sorted and discard mapping
pub(crate) fn get_doc_id_mapping_from_field(
sort_by_field: IndexSortByField,
segment_writer: &SegmentWriter,
) -> crate::Result<DocIdMapping> {
let schema = segment_writer.segment_serializer.segment().schema();
expect_field_id_for_sort_field(&schema, &sort_by_field)?; // for now expect
let new_doc_id_to_old = segment_writer.fast_field_writers.sort_order(
sort_by_field.field.as_str(),
segment_writer.max_doc(),
sort_by_field.order.is_desc(),
);
// create new doc_id to old doc_id index (used in fast_field_writers)
Ok(DocIdMapping::from_new_id_to_old_id(new_doc_id_to_old))
}
#[cfg(test)]
mod tests_indexsorting {
use common::DateTime;
use crate::collector::TopDocs;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::indexer::NoMergePolicy;
use crate::query::QueryParser;
use crate::schema::*;
use crate::{DocAddress, Index, IndexBuilder, IndexSettings, IndexSortByField, Order};
fn create_test_index(
index_settings: Option<IndexSettings>,
text_field_options: TextOptions,
) -> crate::Result<Index> {
let mut schema_builder = Schema::builder();
let my_text_field = schema_builder.add_text_field("text_field", text_field_options);
let my_string_field = schema_builder.add_text_field("string_field", STRING | STORED);
let my_number =
schema_builder.add_u64_field("my_number", NumericOptions::default().set_fast());
let multi_numbers =
schema_builder.add_u64_field("multi_numbers", NumericOptions::default().set_fast());
let schema = schema_builder.build();
let mut index_builder = Index::builder().schema(schema);
if let Some(settings) = index_settings {
index_builder = index_builder.settings(settings);
}
let index = index_builder.create_in_ram()?;
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(my_number=>40_u64))?;
index_writer.add_document(
doc!(my_number=>20_u64, multi_numbers => 5_u64, multi_numbers => 6_u64),
)?;
index_writer.add_document(doc!(my_number=>100_u64))?;
index_writer.add_document(
doc!(my_number=>10_u64, my_string_field=> "blublub", my_text_field => "some text"),
)?;
index_writer.add_document(doc!(my_number=>30_u64, multi_numbers => 3_u64 ))?;
index_writer.commit()?;
Ok(index)
}
fn get_text_options() -> TextOptions {
TextOptions::default().set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::Basic),
)
}
#[test]
fn test_sort_index_test_text_field() -> crate::Result<()> {
// there are different serializers for different settings in postings/recorder.rs
// test remapping for all of them
let options = vec![
get_text_options(),
get_text_options().set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
),
get_text_options().set_indexing_options(
TextFieldIndexing::default()
.set_index_option(IndexRecordOption::WithFreqsAndPositions),
),
];
for option in options {
// let options = get_text_options();
// no index_sort
let index = create_test_index(None, option.clone())?;
let my_text_field = index.schema().get_field("text_field").unwrap();
let searcher = index.reader()?.searcher();
let query = QueryParser::for_index(&index, vec![my_text_field]).parse_query("text")?;
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3))?;
assert_eq!(
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>(),
vec![3]
);
// sort by field asc
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "my_number".to_string(),
order: Order::Asc,
}),
..Default::default()
}),
option.clone(),
)?;
let my_text_field = index.schema().get_field("text_field").unwrap();
let reader = index.reader()?;
let searcher = reader.searcher();
let query = QueryParser::for_index(&index, vec![my_text_field]).parse_query("text")?;
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3))?;
assert_eq!(
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>(),
vec![0]
);
// test new field norm mapping
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let fieldnorm_reader = searcher
.segment_reader(0)
.get_fieldnorms_reader(my_text_field)?;
assert_eq!(fieldnorm_reader.fieldnorm(0), 2); // some text
assert_eq!(fieldnorm_reader.fieldnorm(1), 0);
}
// sort by field desc
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "my_number".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
option.clone(),
)?;
let my_string_field = index.schema().get_field("text_field").unwrap();
let searcher = index.reader()?.searcher();
let query =
QueryParser::for_index(&index, vec![my_string_field]).parse_query("text")?;
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3))?;
assert_eq!(
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>(),
vec![4]
);
// test new field norm mapping
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let fieldnorm_reader = searcher
.segment_reader(0)
.get_fieldnorms_reader(my_text_field)?;
assert_eq!(fieldnorm_reader.fieldnorm(0), 0);
assert_eq!(fieldnorm_reader.fieldnorm(1), 0);
assert_eq!(fieldnorm_reader.fieldnorm(2), 0);
assert_eq!(fieldnorm_reader.fieldnorm(3), 0);
assert_eq!(fieldnorm_reader.fieldnorm(4), 2); // some text
}
}
Ok(())
}
#[test]
fn test_sort_index_get_documents() -> crate::Result<()> {
// default baseline
let index = create_test_index(None, get_text_options())?;
let my_string_field = index.schema().get_field("string_field").unwrap();
let searcher = index.reader()?.searcher();
{
assert!(searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
.get_first(my_string_field)
.is_none());
assert_eq!(
searcher
.doc::<TantivyDocument>(DocAddress::new(0, 3))?
.get_first(my_string_field)
.unwrap()
.as_str(),
Some("blublub")
);
}
// sort by field asc
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "my_number".to_string(),
order: Order::Asc,
}),
..Default::default()
}),
get_text_options(),
)?;
let my_string_field = index.schema().get_field("string_field").unwrap();
let searcher = index.reader()?.searcher();
{
assert_eq!(
searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))?
.get_first(my_string_field)
.unwrap()
.as_str(),
Some("blublub")
);
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
assert!(doc.get_first(my_string_field).is_none());
}
// sort by field desc
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "my_number".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
get_text_options(),
)?;
let my_string_field = index.schema().get_field("string_field").unwrap();
let searcher = index.reader()?.searcher();
{
let doc = searcher.doc::<TantivyDocument>(DocAddress::new(0, 4))?;
assert_eq!(
doc.get_first(my_string_field).unwrap().as_str(),
Some("blublub")
);
}
Ok(())
}
#[test]
fn test_sort_index_test_string_field() -> crate::Result<()> {
let index = create_test_index(None, get_text_options())?;
let my_string_field = index.schema().get_field("string_field").unwrap();
let searcher = index.reader()?.searcher();
let query = QueryParser::for_index(&index, vec![my_string_field]).parse_query("blublub")?;
let top_docs: Vec<(f32, DocAddress)> = searcher.search(&query, &TopDocs::with_limit(3))?;
assert_eq!(
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>(),
vec![3]
);
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "my_number".to_string(),
order: Order::Asc,
}),
..Default::default()
}),
get_text_options(),
)?;
let my_string_field = index.schema().get_field("string_field").unwrap();
let reader = index.reader()?;
let searcher = reader.searcher();
let query = QueryParser::for_index(&index, vec![my_string_field]).parse_query("blublub")?;
let top_docs: Vec<(f32, DocAddress)> = searcher.search(&query, &TopDocs::with_limit(3))?;
assert_eq!(
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>(),
vec![0]
);
// test new field norm mapping
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let fieldnorm_reader = searcher
.segment_reader(0)
.get_fieldnorms_reader(my_text_field)?;
assert_eq!(fieldnorm_reader.fieldnorm(0), 2); // some text
assert_eq!(fieldnorm_reader.fieldnorm(1), 0);
}
// sort by field desc
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "my_number".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
get_text_options(),
)?;
let my_string_field = index.schema().get_field("string_field").unwrap();
let searcher = index.reader()?.searcher();
let query = QueryParser::for_index(&index, vec![my_string_field]).parse_query("blublub")?;
let top_docs: Vec<(f32, DocAddress)> = searcher.search(&query, &TopDocs::with_limit(3))?;
assert_eq!(
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>(),
vec![4]
);
// test new field norm mapping
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let fieldnorm_reader = searcher
.segment_reader(0)
.get_fieldnorms_reader(my_text_field)?;
assert_eq!(fieldnorm_reader.fieldnorm(0), 0);
assert_eq!(fieldnorm_reader.fieldnorm(1), 0);
assert_eq!(fieldnorm_reader.fieldnorm(2), 0);
assert_eq!(fieldnorm_reader.fieldnorm(3), 0);
assert_eq!(fieldnorm_reader.fieldnorm(4), 2); // some text
}
Ok(())
}
#[test]
fn test_sort_index_fast_field() -> crate::Result<()> {
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "my_number".to_string(),
order: Order::Asc,
}),
..Default::default()
}),
get_text_options(),
)?;
assert_eq!(
index.settings().sort_by_field.as_ref().unwrap().field,
"my_number".to_string()
);
let searcher = index.reader()?.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields
.u64("my_number")
.unwrap()
.first_or_default_col(999);
assert_eq!(fast_field.get_val(0), 10u64);
assert_eq!(fast_field.get_val(1), 20u64);
assert_eq!(fast_field.get_val(2), 30u64);
let multifield = fast_fields.u64("multi_numbers").unwrap();
let vals: Vec<u64> = multifield.values_for_doc(0u32).collect();
assert_eq!(vals, &[] as &[u64]);
let vals: Vec<_> = multifield.values_for_doc(1u32).collect();
assert_eq!(vals, &[5, 6]);
let vals: Vec<_> = multifield.values_for_doc(2u32).collect();
assert_eq!(vals, &[3]);
Ok(())
}
#[test]
fn test_with_sort_by_date_field() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", INDEXED | STORED | FAST);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "date".to_string(),
order: Order::Desc,
}),
..Default::default()
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()?;
let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
date_field => DateTime::from_timestamp_secs(1000),
))?;
index_writer.add_document(doc!(
date_field => DateTime::from_timestamp_secs(999),
))?;
index_writer.add_document(doc!(
date_field => DateTime::from_timestamp_secs(1001),
))?;
index_writer.commit()?;
let searcher = index.reader()?.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields
.date("date")
.unwrap()
.first_or_default_col(DateTime::from_timestamp_secs(0));
assert_eq!(fast_field.get_val(0), DateTime::from_timestamp_secs(1001));
assert_eq!(fast_field.get_val(1), DateTime::from_timestamp_secs(1000));
assert_eq!(fast_field.get_val(2), DateTime::from_timestamp_secs(999));
Ok(())
}
#[test]
fn test_doc_mapping() {
let doc_mapping = DocIdMapping::from_new_id_to_old_id(vec![3, 2, 5]);
assert_eq!(doc_mapping.get_old_doc_id(0), 3);
assert_eq!(doc_mapping.get_old_doc_id(1), 2);
assert_eq!(doc_mapping.get_old_doc_id(2), 5);
assert_eq!(doc_mapping.get_new_doc_id(0), 0);
assert_eq!(doc_mapping.get_new_doc_id(1), 0);
assert_eq!(doc_mapping.get_new_doc_id(2), 1);
assert_eq!(doc_mapping.get_new_doc_id(3), 0);
assert_eq!(doc_mapping.get_new_doc_id(4), 0);
assert_eq!(doc_mapping.get_new_doc_id(5), 2);
}
#[test]
fn test_doc_mapping_remap() {
let doc_mapping = DocIdMapping::from_new_id_to_old_id(vec![2, 8, 3]);
assert_eq!(
&doc_mapping.remap(&[0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000]),
&[2000, 8000, 3000]
);
}
#[test]
fn test_text_sort() -> crate::Result<()> {
let mut schema_builder = SchemaBuilder::new();
schema_builder.add_text_field("id", STRING | FAST | STORED);
schema_builder.add_text_field("name", TEXT | STORED);
let resp = IndexBuilder::new()
.schema(schema_builder.build())
.settings(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Asc,
}),
..Default::default()
})
.create_in_ram();
assert!(resp
.unwrap_err()
.to_string()
.contains("Unsupported field type"));
Ok(())
}
}

View File

@@ -803,7 +803,7 @@ mod tests {
use std::collections::{HashMap, HashSet};
use std::net::Ipv6Addr;
use columnar::{Cardinality, Column, MonotonicallyMappableToU128};
use columnar::{Column, MonotonicallyMappableToU128};
use itertools::Itertools;
use proptest::prop_oneof;
@@ -813,15 +813,15 @@ mod tests {
use crate::error::*;
use crate::indexer::index_writer::MEMORY_BUDGET_NUM_BYTES_MIN;
use crate::indexer::NoMergePolicy;
use crate::query::{BooleanQuery, Occur, Query, QueryParser, TermQuery};
use crate::query::{QueryParser, TermQuery};
use crate::schema::{
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions, Schema,
self, Facet, FacetOptions, IndexRecordOption, IpAddrOptions, NumericOptions,
TextFieldIndexing, TextOptions, Value, FAST, INDEXED, STORED, STRING, TEXT,
};
use crate::store::DOCSTORE_CACHE_CAPACITY;
use crate::{
DateTime, DocAddress, Index, IndexSettings, IndexSortByField, IndexWriter, Order,
ReloadPolicy, TantivyDocument, Term,
DateTime, DocAddress, Index, IndexSettings, IndexWriter, ReloadPolicy, TantivyDocument,
Term,
};
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \
@@ -1462,116 +1462,6 @@ mod tests {
assert!(text_fast_field.term_ords(1).eq([1].into_iter()));
}
#[test]
fn test_delete_with_sort_by_field() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED | schema::STORED | FAST);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Desc,
}),
..Default::default()
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()?;
let index_reader = index.reader()?;
let mut index_writer = index.writer_for_tests()?;
// create and delete docs in same commit
for id in 0u64..5u64 {
index_writer.add_document(doc!(id_field => id))?;
}
for id in 2u64..4u64 {
index_writer.delete_term(Term::from_field_u64(id_field, id));
}
for id in 5u64..10u64 {
index_writer.add_document(doc!(id_field => id))?;
}
index_writer.commit()?;
index_reader.reload()?;
let searcher = index_reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
assert_eq!(segment_reader.num_docs(), 8);
assert_eq!(segment_reader.max_doc(), 10);
let fast_field_reader = segment_reader.fast_fields().u64("id")?;
let in_order_alive_ids: Vec<u64> = segment_reader
.doc_ids_alive()
.flat_map(|doc| fast_field_reader.values_for_doc(doc))
.collect();
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 1, 0]);
Ok(())
}
#[test]
fn test_delete_query_with_sort_by_field() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let id_field = schema_builder.add_u64_field("id", INDEXED | schema::STORED | FAST);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Desc,
}),
..Default::default()
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()?;
let index_reader = index.reader()?;
let mut index_writer = index.writer_for_tests()?;
// create and delete docs in same commit
for id in 0u64..5u64 {
index_writer.add_document(doc!(id_field => id))?;
}
for id in 1u64..4u64 {
let term = Term::from_field_u64(id_field, id);
let not_term = Term::from_field_u64(id_field, 2);
let term = Box::new(TermQuery::new(term, Default::default()));
let not_term = Box::new(TermQuery::new(not_term, Default::default()));
let query: BooleanQuery = vec![
(Occur::Must, term as Box<dyn Query>),
(Occur::MustNot, not_term as Box<dyn Query>),
]
.into();
index_writer.delete_query(Box::new(query))?;
}
for id in 5u64..10u64 {
index_writer.add_document(doc!(id_field => id))?;
}
index_writer.commit()?;
index_reader.reload()?;
let searcher = index_reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
assert_eq!(segment_reader.num_docs(), 8);
assert_eq!(segment_reader.max_doc(), 10);
let fast_field_reader = segment_reader.fast_fields().u64("id")?;
let in_order_alive_ids: Vec<u64> = segment_reader
.doc_ids_alive()
.flat_map(|doc| fast_field_reader.values_for_doc(doc))
.collect();
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 2, 0]);
Ok(())
}
#[derive(Debug, Clone)]
enum IndexingOp {
AddMultipleDoc {
@@ -1718,11 +1608,7 @@ mod tests {
id_list
}
fn test_operation_strategy(
ops: &[IndexingOp],
sort_index: bool,
force_end_merge: bool,
) -> crate::Result<Index> {
fn test_operation_strategy(ops: &[IndexingOp], force_end_merge: bool) -> crate::Result<Index> {
let mut schema_builder = schema::Schema::builder();
let json_field = schema_builder.add_json_field("json", FAST | TEXT | STORED);
let ip_field = schema_builder.add_ip_addr_field("ip", FAST | INDEXED | STORED);
@@ -1758,15 +1644,7 @@ mod tests {
);
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let schema = schema_builder.build();
let settings = if sort_index {
IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id_opt".to_string(),
order: Order::Asc,
}),
..Default::default()
}
} else {
let settings = {
IndexSettings {
..Default::default()
}
@@ -2329,78 +2207,13 @@ mod tests {
}
}
// Test if index property is in sort order
if sort_index {
// load all id_opt in each segment and check they are in order
for reader in searcher.segment_readers() {
let (ff_reader, _) = reader.fast_fields().u64_lenient("id_opt").unwrap().unwrap();
let mut ids_in_segment: Vec<u64> = Vec::new();
for doc in 0..reader.num_docs() {
ids_in_segment.extend(ff_reader.values_for_doc(doc));
}
assert!(is_sorted(&ids_in_segment));
fn is_sorted<T>(data: &[T]) -> bool
where T: Ord {
data.windows(2).all(|w| w[0] <= w[1])
}
}
}
Ok(index)
}
#[test]
fn test_fast_field_range() {
let ops: Vec<_> = (0..1000).map(|id| IndexingOp::add(id)).collect();
assert!(test_operation_strategy(&ops, false, true).is_ok());
}
#[test]
fn test_sort_index_on_opt_field_regression() {
assert!(test_operation_strategy(
&[
IndexingOp::add(81),
IndexingOp::add(70),
IndexingOp::DeleteDoc { id: 70 }
],
true,
false
)
.is_ok());
}
#[test]
fn test_simple_multiple_doc() {
assert!(test_operation_strategy(
&[
IndexingOp::AddMultipleDoc {
id: 7,
num_docs: 800,
value: IndexValue::U64(0),
},
IndexingOp::AddMultipleDoc {
id: 92,
num_docs: 800,
value: IndexValue::U64(0),
},
IndexingOp::AddMultipleDoc {
id: 30,
num_docs: 800,
value: IndexValue::U64(0),
},
IndexingOp::AddMultipleDoc {
id: 33,
num_docs: 800,
value: IndexValue::U64(0),
},
],
true,
false
)
.is_ok());
let ops: Vec<_> = (0..1000).map(IndexingOp::add).collect();
assert!(test_operation_strategy(&ops, true).is_ok());
}
#[test]
@@ -2414,7 +2227,6 @@ mod tests {
IndexingOp::Commit,
IndexingOp::Merge
],
true,
false
)
.is_ok());
@@ -2431,7 +2243,6 @@ mod tests {
IndexingOp::add(1),
IndexingOp::Commit,
],
false,
true
)
.is_ok());
@@ -2439,184 +2250,48 @@ mod tests {
#[test]
fn test_minimal_sort_force_end_merge() {
assert!(test_operation_strategy(
&[IndexingOp::add(23), IndexingOp::add(13),],
false,
false
)
.is_ok());
assert!(
test_operation_strategy(&[IndexingOp::add(23), IndexingOp::add(13),], false).is_ok()
);
}
#[test]
fn test_minimal_sort() {
let mut schema_builder = Schema::builder();
let val = schema_builder.add_u64_field("val", FAST);
let id = schema_builder.add_u64_field("id", FAST);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Asc,
}),
..Default::default()
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()
.unwrap();
let mut writer = index.writer_for_tests().unwrap();
writer
.add_document(doc!(id=> 3u64, val=>4u64, val=>4u64))
.unwrap();
writer
.add_document(doc!(id=> 2u64, val=>2u64, val=>2u64))
.unwrap();
writer
.add_document(doc!(id=> 1u64, val=>1u64, val=>1u64))
.unwrap();
writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let id_col: Column = segment_reader
.fast_fields()
.column_opt("id")
.unwrap()
.unwrap();
let val_col: Column = segment_reader
.fast_fields()
.column_opt("val")
.unwrap()
.unwrap();
assert_eq!(id_col.get_cardinality(), Cardinality::Full);
assert_eq!(val_col.get_cardinality(), Cardinality::Multivalued);
assert_eq!(id_col.first(0u32), Some(1u64));
assert_eq!(id_col.first(1u32), Some(2u64));
assert!(val_col.values_for_doc(0u32).eq([1u64, 1u64].into_iter()));
assert!(val_col.values_for_doc(1u32).eq([2u64, 2u64].into_iter()));
}
#[test]
fn test_minimal_sort_force_end_merge_with_delete() {
fn test_minimal_no_force_end_merge() {
assert!(test_operation_strategy(
&[
IndexingOp::add(23),
IndexingOp::add(13),
IndexingOp::DeleteDoc { id: 13 }
],
true,
true
)
.is_ok());
}
#[test]
fn test_minimal_no_sort_no_force_end_merge() {
assert!(test_operation_strategy(
&[
IndexingOp::add(23),
IndexingOp::add(13),
IndexingOp::DeleteDoc { id: 13 }
],
false,
false
)
.is_ok());
}
#[test]
fn test_minimal_sort_merge() {
assert!(test_operation_strategy(&[IndexingOp::add(3),], true, true).is_ok());
}
use proptest::prelude::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(20))]
#[test]
fn test_delete_with_sort_proptest_adding(ops in proptest::collection::vec(adding_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], true, false).is_ok());
fn test_delete_proptest_adding(ops in proptest::collection::vec(adding_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], false).is_ok());
}
#[test]
fn test_delete_without_sort_proptest_adding(ops in proptest::collection::vec(adding_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], false, false).is_ok());
fn test_delete_proptest_with_merge_adding(ops in proptest::collection::vec(adding_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], true).is_ok());
}
#[test]
fn test_delete_with_sort_proptest_with_merge_adding(ops in proptest::collection::vec(adding_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], true, true).is_ok());
fn test_delete_proptest(ops in proptest::collection::vec(balanced_operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], false).is_ok());
}
#[test]
fn test_delete_without_sort_proptest_with_merge_adding(ops in proptest::collection::vec(adding_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], false, true).is_ok());}
#[test]
fn test_delete_with_sort_proptest(ops in proptest::collection::vec(balanced_operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], true, false).is_ok());
fn test_delete_proptest_with_merge(ops in proptest::collection::vec(balanced_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], true).is_ok());
}
#[test]
fn test_delete_without_sort_proptest(ops in proptest::collection::vec(balanced_operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], false, false).is_ok());
}
#[test]
fn test_delete_with_sort_proptest_with_merge(ops in proptest::collection::vec(balanced_operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], true, true).is_ok());
}
#[test]
fn test_delete_without_sort_proptest_with_merge(ops in proptest::collection::vec(balanced_operation_strategy(), 1..100)) {
assert!(test_operation_strategy(&ops[..], false, true).is_ok());
}
}
#[test]
fn test_delete_with_sort_by_field_last_opstamp_is_not_max() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let sort_by_field = schema_builder.add_u64_field("sort_by", FAST);
let id_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "sort_by".to_string(),
order: Order::Asc,
}),
..Default::default()
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()?;
let mut index_writer = index.writer_for_tests()?;
// We add a doc...
index_writer.add_document(doc!(sort_by_field => 2u64, id_field => 0u64))?;
// And remove it.
index_writer.delete_term(Term::from_field_u64(id_field, 0u64));
// We add another doc.
index_writer.add_document(doc!(sort_by_field=>1u64, id_field => 0u64))?;
// The expected result is a segment with
// maxdoc = 2
// numdoc = 1.
index_writer.commit()?;
let searcher = index.reader()?.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
assert_eq!(segment_reader.max_doc(), 2);
assert_eq!(segment_reader.num_docs(), 1);
Ok(())
}
#[test]
@@ -2633,7 +2308,7 @@ mod tests {
IndexingOp::add(4),
Commit,
];
test_operation_strategy(&ops[..], false, true).unwrap();
test_operation_strategy(&ops[..], true).unwrap();
}
#[test]
@@ -2646,7 +2321,7 @@ mod tests {
Commit,
Merge,
];
test_operation_strategy(&ops[..], false, true).unwrap();
test_operation_strategy(&ops[..], true).unwrap();
}
#[test]
@@ -2658,7 +2333,7 @@ mod tests {
IndexingOp::add(13),
Commit,
];
test_operation_strategy(&ops[..], false, true).unwrap();
test_operation_strategy(&ops[..], true).unwrap();
}
#[test]
@@ -2669,7 +2344,7 @@ mod tests {
IndexingOp::add(9),
IndexingOp::add(10),
];
test_operation_strategy(&ops[..], false, false).unwrap();
test_operation_strategy(&ops[..], false).unwrap();
}
#[test]
@@ -2696,7 +2371,6 @@ mod tests {
IndexingOp::Commit,
IndexingOp::Commit
],
false,
false
)
.is_ok());
@@ -2716,7 +2390,6 @@ mod tests {
IndexingOp::Commit,
IndexingOp::Merge,
],
false,
true
)
.is_ok());
@@ -2817,4 +2490,29 @@ mod tests {
Ok(())
}
#[test]
fn test_bug_2442() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let json_field = schema_builder.add_json_field("json", TEXT | FAST);
let schema = schema_builder.build();
let index = Index::builder().schema(schema).create_in_ram()?;
let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer
.add_document(doc!(
json_field=>json!({"\u{0000}B":"1"})
))
.unwrap();
index_writer
.add_document(doc!(
json_field=>json!({" A":"1"})
))
.unwrap();
index_writer.commit()?;
Ok(())
}
}

View File

@@ -0,0 +1,147 @@
#[cfg(test)]
mod tests {
use crate::collector::TopDocs;
use crate::fastfield::AliveBitSet;
use crate::index::Index;
use crate::postings::Postings;
use crate::query::QueryParser;
use crate::schema::{
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
TextFieldIndexing, TextOptions,
};
use crate::{DocAddress, DocSet, IndexSettings, IndexWriter, Term};
fn create_test_index(index_settings: Option<IndexSettings>) -> crate::Result<Index> {
let mut schema_builder = schema::Schema::builder();
let int_options = NumericOptions::default()
.set_fast()
.set_stored()
.set_indexed();
let int_field = schema_builder.add_u64_field("intval", int_options);
let bytes_options = BytesOptions::default().set_fast().set_indexed();
let bytes_field = schema_builder.add_bytes_field("bytes", bytes_options);
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let multi_numbers =
schema_builder.add_u64_field("multi_numbers", NumericOptions::default().set_fast());
let text_field_options = TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
.set_index_option(schema::IndexRecordOption::WithFreqsAndPositions),
)
.set_stored();
let text_field = schema_builder.add_text_field("text_field", text_field_options);
let schema = schema_builder.build();
let mut index_builder = Index::builder().schema(schema);
if let Some(settings) = index_settings {
index_builder = index_builder.settings(settings);
}
let index = index_builder.create_in_ram()?;
{
let mut index_writer = index.writer_for_tests()?;
// segment 1 - range 1-3
index_writer.add_document(doc!(int_field=>1_u64))?;
index_writer.add_document(
doc!(int_field=>3_u64, multi_numbers => 3_u64, multi_numbers => 4_u64, bytes_field => vec![1, 2, 3], text_field => "some text", facet_field=> Facet::from("/book/crime")),
)?;
index_writer.add_document(
doc!(int_field=>1_u64, text_field=> "deleteme", text_field => "ok text more text"),
)?;
index_writer.add_document(
doc!(int_field=>2_u64, multi_numbers => 2_u64, multi_numbers => 3_u64, text_field => "ok text more text"),
)?;
index_writer.commit()?;
index_writer.add_document(doc!(int_field=>20_u64, multi_numbers => 20_u64))?;
let in_val = 1u64;
index_writer.add_document(doc!(int_field=>in_val, text_field=> "deleteme" , text_field => "ok text more text", facet_field=> Facet::from("/book/crime")))?;
index_writer.commit()?;
let int_vals = [10u64, 5];
index_writer.add_document( // position of this doc after delete in desc sorting = [2], in disjunct case [1]
doc!(int_field=>int_vals[0], multi_numbers => 10_u64, multi_numbers => 11_u64, text_field=> "blubber", facet_field=> Facet::from("/book/fantasy")),
)?;
index_writer.add_document(doc!(int_field=>int_vals[1], text_field=> "deleteme"))?;
index_writer.add_document(
doc!(int_field=>1_000u64, multi_numbers => 1001_u64, multi_numbers => 1002_u64, bytes_field => vec![5, 5],text_field => "the biggest num")
)?;
index_writer.delete_term(Term::from_field_text(text_field, "deleteme"));
index_writer.commit()?;
}
// Merging the segments
{
let segment_ids = index.searchable_segment_ids()?;
let mut index_writer: IndexWriter = index.writer_for_tests()?;
index_writer.merge(&segment_ids).wait()?;
index_writer.wait_merging_threads()?;
}
Ok(index)
}
#[test]
fn test_merge_index() {
let index = create_test_index(Some(IndexSettings {
..Default::default()
}))
.unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_readers().last().unwrap();
let searcher = index.reader().unwrap().searcher();
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(&index, vec![my_text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("some"), vec![1]);
assert_eq!(do_search("blubber"), vec![3]);
assert_eq!(do_search("biggest"), vec![4]);
}
// postings file
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let term_a = Term::from_field_text(my_text_field, "text");
let inverted_index = segment_reader.inverted_index(my_text_field).unwrap();
let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap()
.unwrap();
assert_eq!(postings.doc_freq(), 2);
let fallback_bitset = AliveBitSet::for_test_from_deleted_docs(&[0], 100);
assert_eq!(
postings.doc_freq_given_deletes(
segment_reader.alive_bitset().unwrap_or(&fallback_bitset)
),
2
);
assert_eq!(postings.term_freq(), 1);
let mut output = vec![];
postings.positions(&mut output);
assert_eq!(output, vec![1]);
postings.advance();
assert_eq!(postings.term_freq(), 2);
postings.positions(&mut output);
assert_eq!(output, vec![1, 3]);
}
}
}

View File

@@ -1,8 +1,7 @@
use std::sync::Arc;
use columnar::{
ColumnType, ColumnValues, ColumnarReader, MergeRowOrder, RowAddr, ShuffleMergeOrder,
StackMergeOrder,
ColumnType, ColumnarReader, MergeRowOrder, RowAddr, ShuffleMergeOrder, StackMergeOrder,
};
use common::ReadOnlyBitSet;
use itertools::Itertools;
@@ -11,7 +10,7 @@ use measure_time::debug_time;
use crate::directory::WritePtr;
use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption;
use crate::fastfield::{AliveBitSet, FastFieldNotAvailableError};
use crate::fastfield::AliveBitSet;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::index::{Segment, SegmentComponent, SegmentReader};
use crate::indexer::doc_id_mapping::{MappingType, SegmentDocIdMapping};
@@ -20,9 +19,7 @@ use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
use crate::schema::{value_type_to_column_type, Field, FieldType, Schema};
use crate::store::StoreWriter;
use crate::termdict::{TermMerger, TermOrdinal};
use crate::{
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order, SegmentOrdinal,
};
use crate::{DocAddress, DocId, InvertedIndexReader};
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
///
@@ -80,7 +77,6 @@ fn estimate_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::
}
pub struct IndexMerger {
index_settings: IndexSettings,
schema: Schema,
pub(crate) readers: Vec<SegmentReader>,
max_doc: u32,
@@ -116,7 +112,7 @@ fn convert_to_merge_order(
) -> MergeRowOrder {
match doc_id_mapping.mapping_type() {
MappingType::Stacked => MergeRowOrder::Stack(StackMergeOrder::stack(columnars)),
MappingType::StackedWithDeletes | MappingType::Shuffled => {
MappingType::StackedWithDeletes => {
// RUST/LLVM is amazing. The following conversion is actually a no-op:
// no allocation, no copy.
let new_row_id_to_old_row_id: Vec<RowAddr> = doc_id_mapping
@@ -149,13 +145,9 @@ fn extract_fast_field_required_columns(schema: &Schema) -> Vec<(String, ColumnTy
}
impl IndexMerger {
pub fn open(
schema: Schema,
index_settings: IndexSettings,
segments: &[Segment],
) -> crate::Result<IndexMerger> {
pub fn open(schema: Schema, segments: &[Segment]) -> crate::Result<IndexMerger> {
let alive_bitset = segments.iter().map(|_| None).collect_vec();
Self::open_with_custom_alive_set(schema, index_settings, segments, alive_bitset)
Self::open_with_custom_alive_set(schema, segments, alive_bitset)
}
// Create merge with a custom delete set.
@@ -172,7 +164,6 @@ impl IndexMerger {
// segments and partitions them e.g. by a value in a field.
pub fn open_with_custom_alive_set(
schema: Schema,
index_settings: IndexSettings,
segments: &[Segment],
alive_bitset_opt: Vec<Option<AliveBitSet>>,
) -> crate::Result<IndexMerger> {
@@ -186,9 +177,6 @@ impl IndexMerger {
}
let max_doc = readers.iter().map(|reader| reader.num_docs()).sum();
if let Some(sort_by_field) = index_settings.sort_by_field.as_ref() {
readers = Self::sort_readers_by_min_sort_field(readers, sort_by_field)?;
}
// sort segments by their natural sort setting
if max_doc >= MAX_DOC_LIMIT {
let err_msg = format!(
@@ -198,37 +186,12 @@ impl IndexMerger {
return Err(crate::TantivyError::InvalidArgument(err_msg));
}
Ok(IndexMerger {
index_settings,
schema,
readers,
max_doc,
})
}
fn sort_readers_by_min_sort_field(
readers: Vec<SegmentReader>,
sort_by_field: &IndexSortByField,
) -> crate::Result<Vec<SegmentReader>> {
// presort the readers by their min_values, so that when they are disjunct, we can use
// the regular merge logic (implicitly sorted)
let mut readers_with_min_sort_values = readers
.into_iter()
.map(|reader| {
let accessor = Self::get_sort_field_accessor(&reader, sort_by_field)?;
Ok((reader, accessor.min_value()))
})
.collect::<crate::Result<Vec<_>>>()?;
if sort_by_field.order.is_asc() {
readers_with_min_sort_values.sort_by_key(|(_, min_val)| *min_val);
} else {
readers_with_min_sort_values.sort_by_key(|(_, min_val)| std::cmp::Reverse(*min_val));
}
Ok(readers_with_min_sort_values
.into_iter()
.map(|(reader, _)| reader)
.collect())
}
fn write_fieldnorms(
&self,
mut fieldnorms_serializer: FieldNormsSerializer,
@@ -276,128 +239,6 @@ impl IndexMerger {
Ok(())
}
/// Checks if the readers are disjunct for their sort property and in the correct order to be
/// able to just stack them.
pub(crate) fn is_disjunct_and_sorted_on_sort_property(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<bool> {
let reader_ordinal_and_field_accessors =
self.get_reader_with_sort_field_accessor(sort_by_field)?;
let everything_is_in_order = reader_ordinal_and_field_accessors
.into_iter()
.map(|(_, col)| Arc::new(col))
.tuple_windows()
.all(|(field_accessor1, field_accessor2)| {
if sort_by_field.order.is_asc() {
field_accessor1.max_value() <= field_accessor2.min_value()
} else {
field_accessor1.min_value() >= field_accessor2.max_value()
}
});
Ok(everything_is_in_order)
}
pub(crate) fn get_sort_field_accessor(
reader: &SegmentReader,
sort_by_field: &IndexSortByField,
) -> crate::Result<Arc<dyn ColumnValues>> {
reader.schema().get_field(&sort_by_field.field)?;
let (value_accessor, _column_type) = reader
.fast_fields()
.u64_lenient(&sort_by_field.field)?
.ok_or_else(|| FastFieldNotAvailableError {
field_name: sort_by_field.field.to_string(),
})?;
Ok(value_accessor.first_or_default_col(0u64))
}
/// Collecting value_accessors into a vec to bind the lifetime.
pub(crate) fn get_reader_with_sort_field_accessor(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<Vec<(SegmentOrdinal, Arc<dyn ColumnValues>)>> {
let reader_ordinal_and_field_accessors = self
.readers
.iter()
.enumerate()
.map(|(reader_ordinal, _)| reader_ordinal as SegmentOrdinal)
.map(|reader_ordinal: SegmentOrdinal| {
let value_accessor = Self::get_sort_field_accessor(
&self.readers[reader_ordinal as usize],
sort_by_field,
)?;
Ok((reader_ordinal, value_accessor))
})
.collect::<crate::Result<Vec<_>>>()?;
Ok(reader_ordinal_and_field_accessors)
}
/// Generates the doc_id mapping where position in the vec=new
/// doc_id.
/// ReaderWithOrdinal will include the ordinal position of the
/// reader in self.readers.
pub(crate) fn generate_doc_id_mapping_with_sort_by_field(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<SegmentDocIdMapping> {
let reader_ordinal_and_field_accessors =
self.get_reader_with_sort_field_accessor(sort_by_field)?;
// Loading the field accessor on demand causes a 15x regression
// create iterators over segment/sort_accessor/doc_id tuple
let doc_id_reader_pair =
reader_ordinal_and_field_accessors
.iter()
.map(|(reader_ord, ff_reader)| {
let reader = &self.readers[*reader_ord as usize];
reader
.doc_ids_alive()
.map(move |doc_id| (doc_id, reader_ord, ff_reader))
});
let total_num_new_docs = self
.readers
.iter()
.map(|reader| reader.num_docs() as usize)
.sum();
let mut sorted_doc_ids: Vec<DocAddress> = Vec::with_capacity(total_num_new_docs);
// create iterator tuple of (old doc_id, reader) in order of the new doc_ids
sorted_doc_ids.extend(
doc_id_reader_pair
.into_iter()
.kmerge_by(|a, b| {
let val1 = a.2.get_val(a.0);
let val2 = b.2.get_val(b.0);
if sort_by_field.order == Order::Asc {
val1 < val2
} else {
val1 > val2
}
})
.map(|(doc_id, &segment_ord, _)| DocAddress {
doc_id,
segment_ord,
}),
);
let alive_bitsets: Vec<Option<ReadOnlyBitSet>> = self
.readers
.iter()
.map(|segment_reader| {
let alive_bitset = segment_reader.alive_bitset()?;
Some(alive_bitset.bitset().clone())
})
.collect();
Ok(SegmentDocIdMapping::new(
sorted_doc_ids,
MappingType::Shuffled,
alive_bitsets,
))
}
/// Creates a mapping if the segments are stacked. this is helpful to merge codelines between
/// index sorting and the others
pub(crate) fn get_doc_id_from_concatenated_data(&self) -> crate::Result<SegmentDocIdMapping> {
@@ -515,7 +356,6 @@ impl IndexMerger {
);
let mut segment_postings_containing_the_term: Vec<(usize, SegmentPostings)> = vec![];
let mut doc_id_and_positions = vec![];
while merged_terms.advance() {
segment_postings_containing_the_term.clear();
@@ -611,37 +451,13 @@ impl IndexMerger {
0u32
};
// if doc_id_mapping exists, the doc_ids are reordered, they are
// not just stacked. The field serializer expects monotonically increasing
// doc_ids, so we collect and sort them first, before writing.
//
// I think this is not strictly necessary, it would be possible to
// avoid the loading into a vec via some form of kmerge, but then the merge
// logic would deviate much more from the stacking case (unsorted index)
if !doc_id_mapping.is_trivial() {
doc_id_and_positions.push((
remapped_doc_id,
term_freq,
positions_buffer.to_vec(),
));
} else {
let delta_positions = delta_computer.compute_delta(&positions_buffer);
field_serializer.write_doc(remapped_doc_id, term_freq, delta_positions);
}
let delta_positions = delta_computer.compute_delta(&positions_buffer);
field_serializer.write_doc(remapped_doc_id, term_freq, delta_positions);
}
doc = segment_postings.advance();
}
}
if !doc_id_mapping.is_trivial() {
doc_id_and_positions.sort_unstable_by_key(|&(doc_id, _, _)| doc_id);
for (doc_id, term_freq, positions) in &doc_id_and_positions {
let delta_positions = delta_computer.compute_delta(positions);
field_serializer.write_doc(*doc_id, *term_freq, delta_positions);
}
doc_id_and_positions.clear();
}
// closing the term.
field_serializer.close_term()?;
}
@@ -670,47 +486,13 @@ impl IndexMerger {
Ok(())
}
fn write_storable_fields(
&self,
store_writer: &mut StoreWriter,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
fn write_storable_fields(&self, store_writer: &mut StoreWriter) -> crate::Result<()> {
debug_time!("write-storable-fields");
debug!("write-storable-field");
if !doc_id_mapping.is_trivial() {
debug!("non-trivial-doc-id-mapping");
let store_readers: Vec<_> = self
.readers
.iter()
.map(|reader| reader.get_store_reader(50))
.collect::<Result<_, _>>()?;
let mut document_iterators: Vec<_> = store_readers
.iter()
.enumerate()
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
.collect();
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
let doc_bytes_it = &mut document_iterators[old_doc_addr.segment_ord as usize];
if let Some(doc_bytes_res) = doc_bytes_it.next() {
let doc_bytes = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
} else {
return Err(DataCorruption::comment_only(format!(
"unexpected missing document in docstore on merge, doc address \
{old_doc_addr:?}",
))
.into());
}
}
} else {
debug!("trivial-doc-id-mapping");
for reader in &self.readers {
let store_reader = reader.get_store_reader(1)?;
if reader.has_deletes()
for reader in &self.readers {
let store_reader = reader.get_store_reader(1)?;
if reader.has_deletes()
// If there is not enough data in the store, we avoid stacking in order to
// avoid creating many small blocks in the doc store. Once we have 5 full blocks,
// we start stacking. In the worst case 2/7 of the blocks would be very small.
@@ -726,14 +508,13 @@ impl IndexMerger {
// take 7 in order to not walk over all checkpoints.
|| store_reader.block_checkpoints().take(7).count() < 6
|| store_reader.decompressor() != store_writer.compressor().into()
{
for doc_bytes_res in store_reader.iter_raw(reader.alive_bitset()) {
let doc_bytes = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
}
} else {
store_writer.stack(store_reader)?;
{
for doc_bytes_res in store_reader.iter_raw(reader.alive_bitset()) {
let doc_bytes = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
}
} else {
store_writer.stack(store_reader)?;
}
}
Ok(())
@@ -745,18 +526,7 @@ impl IndexMerger {
/// # Returns
/// The number of documents in the resulting segment.
pub fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
let doc_id_mapping = if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref()
{
// If the documents are already sorted and stackable, we ignore the mapping and execute
// it as if there was no sorting
if self.is_disjunct_and_sorted_on_sort_property(sort_by_field)? {
self.get_doc_id_from_concatenated_data()?
} else {
self.generate_doc_id_mapping_with_sort_by_field(sort_by_field)?
}
} else {
self.get_doc_id_from_concatenated_data()?
};
let doc_id_mapping = self.get_doc_id_from_concatenated_data()?;
debug!("write-fieldnorms");
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
self.write_fieldnorms(fieldnorms_serializer, &doc_id_mapping)?;
@@ -773,7 +543,7 @@ impl IndexMerger {
)?;
debug!("write-storagefields");
self.write_storable_fields(serializer.get_store_writer(), &doc_id_mapping)?;
self.write_storable_fields(serializer.get_store_writer())?;
debug!("write-fastfields");
self.write_fast_fields(serializer.get_fast_field_write(), doc_id_mapping)?;
@@ -805,7 +575,7 @@ mod tests {
use crate::time::OffsetDateTime;
use crate::{
assert_nearly_equals, schema, DateTime, DocAddress, DocId, DocSet, IndexSettings,
IndexSortByField, IndexWriter, Order, Searcher,
IndexWriter, Searcher,
};
#[test]
@@ -1278,60 +1048,6 @@ mod tests {
test_merge_facets(None, true)
}
#[test]
fn test_merge_facets_sort_asc() {
// In the merge case this will go through the doc_id mapping code
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
true,
);
// In the merge case this will not go through the doc_id mapping code, because the data
// sorted and disjunct
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
false,
);
}
#[test]
fn test_merge_facets_sort_desc() {
// In the merge case this will go through the doc_id mapping code
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
true,
);
// In the merge case this will not go through the doc_id mapping code, because the data
// sorted and disjunct
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
false,
);
}
// force_segment_value_overlap forces the int value for sorting to have overlapping min and max
// ranges between segments so that merge algorithm can't apply certain optimizations
fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap: bool) {

View File

@@ -1,579 +0,0 @@
#[cfg(test)]
mod tests {
use crate::collector::TopDocs;
use crate::fastfield::AliveBitSet;
use crate::index::Index;
use crate::postings::Postings;
use crate::query::QueryParser;
use crate::schema::{
self, BytesOptions, Facet, FacetOptions, IndexRecordOption, NumericOptions,
TextFieldIndexing, TextOptions, Value,
};
use crate::{
DocAddress, DocSet, IndexSettings, IndexSortByField, IndexWriter, Order, TantivyDocument,
Term,
};
fn create_test_index_posting_list_issue(index_settings: Option<IndexSettings>) -> Index {
let mut schema_builder = schema::Schema::builder();
let int_options = NumericOptions::default().set_fast().set_indexed();
let int_field = schema_builder.add_u64_field("intval", int_options);
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let schema = schema_builder.build();
let mut index_builder = Index::builder().schema(schema);
if let Some(settings) = index_settings {
index_builder = index_builder.settings(settings);
}
let index = index_builder.create_in_ram().unwrap();
{
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
index_writer
.add_document(doc!(int_field=>3_u64, facet_field=> Facet::from("/crime")))
.unwrap();
index_writer
.add_document(doc!(int_field=>6_u64, facet_field=> Facet::from("/crime")))
.unwrap();
index_writer.commit().unwrap();
index_writer
.add_document(doc!(int_field=>5_u64, facet_field=> Facet::from("/fanta")))
.unwrap();
index_writer.commit().unwrap();
}
// Merging the segments
{
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
assert!(index_writer.merge(&segment_ids).wait().is_ok());
assert!(index_writer.wait_merging_threads().is_ok());
}
index
}
// force_disjunct_segment_sort_values forces the field, by which the index is sorted have
// disjunct ranges between segments, e.g. values in segment [1-3] [10 - 20] [50 - 500]
fn create_test_index(
index_settings: Option<IndexSettings>,
force_disjunct_segment_sort_values: bool,
) -> crate::Result<Index> {
let mut schema_builder = schema::Schema::builder();
let int_options = NumericOptions::default()
.set_fast()
.set_stored()
.set_indexed();
let int_field = schema_builder.add_u64_field("intval", int_options);
let bytes_options = BytesOptions::default().set_fast().set_indexed();
let bytes_field = schema_builder.add_bytes_field("bytes", bytes_options);
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let multi_numbers =
schema_builder.add_u64_field("multi_numbers", NumericOptions::default().set_fast());
let text_field_options = TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
.set_index_option(schema::IndexRecordOption::WithFreqsAndPositions),
)
.set_stored();
let text_field = schema_builder.add_text_field("text_field", text_field_options);
let schema = schema_builder.build();
let mut index_builder = Index::builder().schema(schema);
if let Some(settings) = index_settings {
index_builder = index_builder.settings(settings);
}
let index = index_builder.create_in_ram()?;
{
let mut index_writer = index.writer_for_tests()?;
// segment 1 - range 1-3
index_writer.add_document(doc!(int_field=>1_u64))?;
index_writer.add_document(
doc!(int_field=>3_u64, multi_numbers => 3_u64, multi_numbers => 4_u64, bytes_field => vec![1, 2, 3], text_field => "some text", facet_field=> Facet::from("/book/crime")),
)?;
index_writer.add_document(
doc!(int_field=>1_u64, text_field=> "deleteme", text_field => "ok text more text"),
)?;
index_writer.add_document(
doc!(int_field=>2_u64, multi_numbers => 2_u64, multi_numbers => 3_u64, text_field => "ok text more text"),
)?;
index_writer.commit()?;
// segment 2 - range 1-20 , with force_disjunct_segment_sort_values 10-20
index_writer.add_document(doc!(int_field=>20_u64, multi_numbers => 20_u64))?;
let in_val = if force_disjunct_segment_sort_values {
10_u64
} else {
1
};
index_writer.add_document(doc!(int_field=>in_val, text_field=> "deleteme" , text_field => "ok text more text", facet_field=> Facet::from("/book/crime")))?;
index_writer.commit()?;
// segment 3 - range 5-1000, with force_disjunct_segment_sort_values 50-1000
let int_vals = if force_disjunct_segment_sort_values {
[100_u64, 50]
} else {
[10, 5]
};
index_writer.add_document( // position of this doc after delete in desc sorting = [2], in disjunct case [1]
doc!(int_field=>int_vals[0], multi_numbers => 10_u64, multi_numbers => 11_u64, text_field=> "blubber", facet_field=> Facet::from("/book/fantasy")),
)?;
index_writer.add_document(doc!(int_field=>int_vals[1], text_field=> "deleteme"))?;
index_writer.add_document(
doc!(int_field=>1_000u64, multi_numbers => 1001_u64, multi_numbers => 1002_u64, bytes_field => vec![5, 5],text_field => "the biggest num")
)?;
index_writer.delete_term(Term::from_field_text(text_field, "deleteme"));
index_writer.commit()?;
}
// Merging the segments
{
let segment_ids = index.searchable_segment_ids()?;
let mut index_writer: IndexWriter = index.writer_for_tests()?;
index_writer.merge(&segment_ids).wait()?;
index_writer.wait_merging_threads()?;
}
Ok(index)
}
#[test]
fn test_merge_sorted_postinglist_sort_issue() {
create_test_index_posting_list_issue(Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}));
}
#[test]
fn test_merge_sorted_index_desc_not_disjunct() {
test_merge_sorted_index_desc_(false);
}
#[test]
fn test_merge_sorted_index_desc_disjunct() {
test_merge_sorted_index_desc_(true);
}
fn test_merge_sorted_index_desc_(force_disjunct_segment_sort_values: bool) {
let index = create_test_index(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
force_disjunct_segment_sort_values,
)
.unwrap();
let int_field = index.schema().get_field("intval").unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_readers().last().unwrap();
let fast_fields = segment_reader.fast_fields();
let fast_field = fast_fields.u64("intval").unwrap();
assert_eq!(fast_field.first(5), Some(1u64));
assert_eq!(fast_field.first(4), Some(2u64));
assert_eq!(fast_field.first(3), Some(3u64));
if force_disjunct_segment_sort_values {
assert_eq!(fast_field.first(2), Some(20u64));
assert_eq!(fast_field.first(1), Some(100u64));
} else {
assert_eq!(fast_field.first(2), Some(10u64));
assert_eq!(fast_field.first(1), Some(20u64));
}
assert_eq!(fast_field.first(0), Some(1_000u64));
// test new field norm mapping
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let fieldnorm_reader = segment_reader.get_fieldnorms_reader(my_text_field).unwrap();
assert_eq!(fieldnorm_reader.fieldnorm(0), 3); // the biggest num
if force_disjunct_segment_sort_values {
assert_eq!(fieldnorm_reader.fieldnorm(1), 1); // blubber
assert_eq!(fieldnorm_reader.fieldnorm(2), 0);
} else {
assert_eq!(fieldnorm_reader.fieldnorm(1), 0);
assert_eq!(fieldnorm_reader.fieldnorm(2), 1); // blubber
}
assert_eq!(fieldnorm_reader.fieldnorm(3), 2); // some text
assert_eq!(fieldnorm_reader.fieldnorm(5), 0);
}
let my_text_field = index.schema().get_field("text_field").unwrap();
let searcher = index.reader().unwrap().searcher();
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(&index, vec![my_text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("some"), vec![3]);
if force_disjunct_segment_sort_values {
assert_eq!(do_search("blubber"), vec![1]);
} else {
assert_eq!(do_search("blubber"), vec![2]);
}
assert_eq!(do_search("biggest"), vec![0]);
}
// postings file
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let term_a = Term::from_field_text(my_text_field, "text");
let inverted_index = segment_reader.inverted_index(my_text_field).unwrap();
let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap()
.unwrap();
assert_eq!(postings.doc_freq(), 2);
let fallback_bitset = AliveBitSet::for_test_from_deleted_docs(&[0], 100);
assert_eq!(
postings.doc_freq_given_deletes(
segment_reader.alive_bitset().unwrap_or(&fallback_bitset)
),
2
);
assert_eq!(postings.term_freq(), 1);
let mut output = vec![];
postings.positions(&mut output);
assert_eq!(output, vec![1]);
postings.advance();
assert_eq!(postings.term_freq(), 2);
postings.positions(&mut output);
assert_eq!(output, vec![1, 3]);
}
// access doc store
{
let blubber_pos = if force_disjunct_segment_sort_values {
1
} else {
2
};
let doc = searcher
.doc::<TantivyDocument>(DocAddress::new(0, blubber_pos))
.unwrap();
assert_eq!(
doc.get_first(my_text_field).unwrap().as_value().as_str(),
Some("blubber")
);
let doc = searcher
.doc::<TantivyDocument>(DocAddress::new(0, 0))
.unwrap();
assert_eq!(
doc.get_first(int_field).unwrap().as_value().as_u64(),
Some(1000)
);
}
}
#[test]
fn test_merge_unsorted_index() {
let index = create_test_index(
Some(IndexSettings {
..Default::default()
}),
false,
)
.unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_readers().last().unwrap();
let searcher = index.reader().unwrap().searcher();
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(&index, vec![my_text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
};
assert_eq!(do_search("some"), vec![1]);
assert_eq!(do_search("blubber"), vec![3]);
assert_eq!(do_search("biggest"), vec![4]);
}
// postings file
{
let my_text_field = index.schema().get_field("text_field").unwrap();
let term_a = Term::from_field_text(my_text_field, "text");
let inverted_index = segment_reader.inverted_index(my_text_field).unwrap();
let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap()
.unwrap();
assert_eq!(postings.doc_freq(), 2);
let fallback_bitset = AliveBitSet::for_test_from_deleted_docs(&[0], 100);
assert_eq!(
postings.doc_freq_given_deletes(
segment_reader.alive_bitset().unwrap_or(&fallback_bitset)
),
2
);
assert_eq!(postings.term_freq(), 1);
let mut output = vec![];
postings.positions(&mut output);
assert_eq!(output, vec![1]);
postings.advance();
assert_eq!(postings.term_freq(), 2);
postings.positions(&mut output);
assert_eq!(output, vec![1, 3]);
}
}
// #[test]
// fn test_merge_sorted_index_asc() {
// let index = create_test_index(
// Some(IndexSettings {
// sort_by_field: Some(IndexSortByField {
// field: "intval".to_string(),
// order: Order::Asc,
// }),
// ..Default::default()
// }),
// false,
// )
// .unwrap();
// let int_field = index.schema().get_field("intval").unwrap();
// let multi_numbers = index.schema().get_field("multi_numbers").unwrap();
// let bytes_field = index.schema().get_field("bytes").unwrap();
// let reader = index.reader().unwrap();
// let searcher = reader.searcher();
// assert_eq!(searcher.segment_readers().len(), 1);
// let segment_reader = searcher.segment_readers().last().unwrap();
// let fast_fields = segment_reader.fast_fields();
// let fast_field = fast_fields.u64(int_field).unwrap();
// assert_eq!(fast_field.get_val(0), 1u64);
// assert_eq!(fast_field.get_val(1), 2u64);
// assert_eq!(fast_field.get_val(2), 3u64);
// assert_eq!(fast_field.get_val(3), 10u64);
// assert_eq!(fast_field.get_val(4), 20u64);
// assert_eq!(fast_field.get_val(5), 1_000u64);
// let get_vals = |fast_field: &MultiValuedFastFieldReader<u64>, doc_id: u32| -> Vec<u64> {
// let mut vals = vec![];
// fast_field.get_vals(doc_id, &mut vals);
// vals
// };
// let fast_fields = segment_reader.fast_fields();
// let fast_field = fast_fields.u64s(multi_numbers).unwrap();
// assert_eq!(&get_vals(&fast_field, 0), &[] as &[u64]);
// assert_eq!(&get_vals(&fast_field, 1), &[2, 3]);
// assert_eq!(&get_vals(&fast_field, 2), &[3, 4]);
// assert_eq!(&get_vals(&fast_field, 3), &[10, 11]);
// assert_eq!(&get_vals(&fast_field, 4), &[20]);
// assert_eq!(&get_vals(&fast_field, 5), &[1001, 1002]);
// let fast_field = fast_fields.bytes(bytes_field).unwrap();
// assert_eq!(fast_field.get_bytes(0), &[] as &[u8]);
// assert_eq!(fast_field.get_bytes(2), &[1, 2, 3]);
// assert_eq!(fast_field.get_bytes(5), &[5, 5]);
// // test new field norm mapping
// {
// let my_text_field = index.schema().get_field("text_field").unwrap();
// let fieldnorm_reader = segment_reader.get_fieldnorms_reader(my_text_field).unwrap();
// assert_eq!(fieldnorm_reader.fieldnorm(0), 0);
// assert_eq!(fieldnorm_reader.fieldnorm(1), 4);
// assert_eq!(fieldnorm_reader.fieldnorm(2), 2); // some text
// assert_eq!(fieldnorm_reader.fieldnorm(3), 1);
// assert_eq!(fieldnorm_reader.fieldnorm(5), 3); // the biggest num
// }
// let searcher = index.reader().unwrap().searcher();
// {
// let my_text_field = index.schema().get_field("text_field").unwrap();
// let do_search = |term: &str| {
// let query = QueryParser::for_index(&index, vec![my_text_field])
// .parse_query(term)
// .unwrap();
// let top_docs: Vec<(f32, DocAddress)> =
// searcher.search(&query, &TopDocs::with_limit(3)).unwrap();
// top_docs.iter().map(|el| el.1.doc_id).collect::<Vec<_>>()
// };
// assert_eq!(do_search("some"), vec![2]);
// assert_eq!(do_search("blubber"), vec![3]);
// assert_eq!(do_search("biggest"), vec![5]);
// }
// // postings file
// {
// let my_text_field = index.schema().get_field("text_field").unwrap();
// let term_a = Term::from_field_text(my_text_field, "text");
// let inverted_index = segment_reader.inverted_index(my_text_field).unwrap();
// let mut postings = inverted_index
// .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
// .unwrap()
// .unwrap();
// assert_eq!(postings.doc_freq(), 2);
// let fallback_bitset = AliveBitSet::for_test_from_deleted_docs(&[0], 100);
// assert_eq!(
// postings.doc_freq_given_deletes(
// segment_reader.alive_bitset().unwrap_or(&fallback_bitset)
// ),
// 2
// );
// let mut output = vec![];
// postings.positions(&mut output);
// assert_eq!(output, vec![1, 3]);
// postings.advance();
// postings.positions(&mut output);
// assert_eq!(output, vec![1]);
// }
// // access doc store
// {
// let doc = searcher.doc(DocAddress::new(0, 0)).unwrap();
// assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(1));
// let doc = searcher.doc(DocAddress::new(0, 1)).unwrap();
// assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(2));
// let doc = searcher.doc(DocAddress::new(0, 2)).unwrap();
// assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(3));
// let doc = searcher.doc(DocAddress::new(0, 3)).unwrap();
// assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(10));
// let doc = searcher.doc(DocAddress::new(0, 4)).unwrap();
// assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(20));
// let doc = searcher.doc(DocAddress::new(0, 5)).unwrap();
// assert_eq!(doc.get_first(int_field).unwrap().as_u64(), Some(1_000));
// }
// }
}
#[cfg(all(test, feature = "unstable"))]
mod bench_sorted_index_merge {
use test::{self, Bencher};
use crate::index::Index;
use crate::indexer::merger::IndexMerger;
use crate::schema::{NumericOptions, Schema};
use crate::{IndexSettings, IndexSortByField, IndexWriter, Order};
fn create_index(sort_by_field: Option<IndexSortByField>) -> Index {
let mut schema_builder = Schema::builder();
let int_options = NumericOptions::default().set_fast().set_indexed();
let int_field = schema_builder.add_u64_field("intval", int_options);
let schema = schema_builder.build();
let index_builder = Index::builder().schema(schema).settings(IndexSettings {
sort_by_field,
..Default::default()
});
let index = index_builder.create_in_ram().unwrap();
{
let mut index_writer: IndexWriter = index.writer_for_tests().unwrap();
let index_doc = |index_writer: &mut IndexWriter, val: u64| {
index_writer.add_document(doc!(int_field=>val)).unwrap();
};
// 3 segments with 10_000 values in the fast fields
for _ in 0..3 {
index_doc(&mut index_writer, 5000); // fix to make it unordered
for i in 0..10_000 {
index_doc(&mut index_writer, i);
}
index_writer.commit().unwrap();
}
}
index
}
//#[bench]
// fn create_sorted_index_walk_overkmerge_on_merge_fastfield(
// b: &mut Bencher,
//) -> crate::Result<()> {
// let sort_by_field = IndexSortByField {
// field: "intval".to_string(),
// order: Order::Desc,
//};
// let index = create_index(Some(sort_by_field.clone()));
// let segments = index.searchable_segments().unwrap();
// let merger: IndexMerger =
// IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
// let doc_id_mapping = merger.generate_doc_id_mapping(&sort_by_field).unwrap();
// b.iter(|| {
// let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
// let reader = &merger.readers[doc_addr.segment_ord as usize];
// let u64_reader: Arc<dyn Column<u64>> = reader
//.fast_fields()
//.typed_fast_field_reader("intval")
//.expect(
//"Failed to find a reader for single fast field. This is a tantivy bug and \
// it should never happen.",
//);
//(doc_addr.doc_id, reader, u64_reader)
//});
/// add values in order of the new doc_ids
// let mut val = 0;
// for (doc_id, _reader, field_reader) in sorted_doc_ids {
// val = field_reader.get_val(doc_id);
//}
// val
//});
// Ok(())
//}
#[bench]
fn create_sorted_index_create_doc_id_mapping(b: &mut Bencher) -> crate::Result<()> {
let sort_by_field = IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
};
let index = create_index(Some(sort_by_field.clone()));
// let field = index.schema().get_field("intval").unwrap();
let segments = index.searchable_segments().unwrap();
let merger: IndexMerger =
IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
b.iter(|| {
merger
.generate_doc_id_mapping_with_sort_by_field(&sort_by_field)
.unwrap();
});
Ok(())
}
}

View File

@@ -13,10 +13,10 @@ mod flat_map_with_buffer;
pub(crate) mod index_writer;
pub(crate) mod index_writer_status;
mod log_merge_policy;
mod merge_index_test;
mod merge_operation;
pub(crate) mod merge_policy;
pub(crate) mod merger;
mod merger_sorted_index_test;
pub(crate) mod operation;
pub(crate) mod prepared_commit;
mod segment_entry;

View File

@@ -1,3 +1,5 @@
use common::json_path_writer::JSON_END_OF_PATH;
use common::replace_in_place;
use fnv::FnvHashMap;
/// `Field` is represented by an unsigned 32-bit integer type.
@@ -38,7 +40,14 @@ impl PathToUnorderedId {
#[cold]
fn insert_new_path(&mut self, path: &str) -> u32 {
let next_id = self.map.len() as u32;
self.map.insert(path.to_string(), next_id);
let mut new_path = path.to_string();
// The unsafe below is safe as long as b'.' and JSON_PATH_SEGMENT_SEP are
// valid single byte ut8 strings.
// By utf-8 design, they cannot be part of another codepoint.
unsafe { replace_in_place(JSON_END_OF_PATH, b'0', new_path.as_bytes_mut()) };
self.map.insert(new_path, next_id);
next_id
}

View File

@@ -18,27 +18,9 @@ pub struct SegmentSerializer {
impl SegmentSerializer {
/// Creates a new `SegmentSerializer`.
pub fn for_segment(
mut segment: Segment,
is_in_merge: bool,
) -> crate::Result<SegmentSerializer> {
// If the segment is going to be sorted, we stream the docs first to a temporary file.
// In the merge case this is not necessary because we can kmerge the already sorted
// segments
let remapping_required = segment.index().settings().sort_by_field.is_some() && !is_in_merge;
pub fn for_segment(mut segment: Segment) -> crate::Result<SegmentSerializer> {
let settings = segment.index().settings().clone();
let store_writer = if remapping_required {
let store_write = segment.open_write(SegmentComponent::TempStore)?;
StoreWriter::new(
store_write,
crate::store::Compressor::None,
// We want fast random access on the docs, so we choose a small block size.
// If this is zero, the skip index will contain too many checkpoints and
// therefore will be relatively slow.
16000,
settings.docstore_compress_dedicated_thread,
)?
} else {
let store_writer = {
let store_write = segment.open_write(SegmentComponent::Store)?;
StoreWriter::new(
store_write,
@@ -72,10 +54,6 @@ impl SegmentSerializer {
&self.segment
}
pub fn segment_mut(&mut self) -> &mut Segment {
&mut self.segment
}
/// Accessor to the `PostingsSerializer`.
pub fn get_postings_serializer(&mut self) -> &mut InvertedIndexSerializer {
&mut self.postings_serializer

View File

@@ -115,11 +115,10 @@ fn merge(
.collect();
// An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger =
IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
// ... we just serialize this index merger in our new segment to merge the segments.
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone(), true)?;
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone())?;
let num_docs = merger.write(segment_serializer)?;
@@ -220,13 +219,9 @@ pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
)?;
let merged_segment = merged_index.new_segment();
let merged_segment_id = merged_segment.id();
let merger: IndexMerger = IndexMerger::open_with_custom_alive_set(
merged_index.schema(),
merged_index.settings().clone(),
segments,
filter_doc_ids,
)?;
let segment_serializer = SegmentSerializer::for_segment(merged_segment, true)?;
let merger: IndexMerger =
IndexMerger::open_with_custom_alive_set(merged_index.schema(), segments, filter_doc_ids)?;
let segment_serializer = SegmentSerializer::for_segment(merged_segment)?;
let num_docs = merger.write(segment_serializer)?;
let segment_meta = merged_index.new_segment_meta(merged_segment_id, num_docs);
@@ -1067,7 +1062,6 @@ mod tests {
)?;
let merger: IndexMerger = IndexMerger::open_with_custom_alive_set(
merged_index.schema(),
merged_index.settings().clone(),
&segments[..],
filter_segments,
)?;
@@ -1083,7 +1077,6 @@ mod tests {
Index::create(RamDirectory::default(), target_schema, target_settings)?;
let merger: IndexMerger = IndexMerger::open_with_custom_alive_set(
merged_index.schema(),
merged_index.settings().clone(),
&segments[..],
filter_segments,
)?;

View File

@@ -3,7 +3,6 @@ use common::JsonPathWriter;
use itertools::Itertools;
use tokenizer_api::BoxTokenStream;
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
use super::operation::AddOperation;
use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::{FieldNormReaders, FieldNormsWriter};
@@ -16,7 +15,6 @@ use crate::postings::{
};
use crate::schema::document::{Document, Value};
use crate::schema::{FieldEntry, FieldType, Schema, Term, DATE_TIME_PRECISION_INDEXED};
use crate::store::{StoreReader, StoreWriter};
use crate::tokenizer::{FacetTokenizer, PreTokenizedStream, TextAnalyzer, Tokenizer};
use crate::{DocId, Opstamp, TantivyError};
@@ -41,20 +39,6 @@ fn compute_initial_table_size(per_thread_memory_budget: usize) -> crate::Result<
})
}
fn remap_doc_opstamps(
opstamps: Vec<Opstamp>,
doc_id_mapping_opt: Option<&DocIdMapping>,
) -> Vec<Opstamp> {
if let Some(doc_id_mapping_opt) = doc_id_mapping_opt {
doc_id_mapping_opt
.iter_old_doc_ids()
.map(|doc| opstamps[doc as usize])
.collect()
} else {
opstamps
}
}
/// A `SegmentWriter` is in charge of creating segment index from a
/// set of documents.
///
@@ -90,7 +74,7 @@ impl SegmentWriter {
let tokenizer_manager = segment.index().tokenizers().clone();
let tokenizer_manager_fast_field = segment.index().fast_field_tokenizer().clone();
let table_size = compute_initial_table_size(memory_budget_in_bytes)?;
let segment_serializer = SegmentSerializer::for_segment(segment, false)?;
let segment_serializer = SegmentSerializer::for_segment(segment)?;
let per_field_postings_writers = PerFieldPostingsWriter::for_schema(&schema);
let per_field_text_analyzers = schema
.fields()
@@ -139,15 +123,6 @@ impl SegmentWriter {
/// be used afterwards.
pub fn finalize(mut self) -> crate::Result<Vec<u64>> {
self.fieldnorms_writer.fill_up_to_max_doc(self.max_doc);
let mapping: Option<DocIdMapping> = self
.segment_serializer
.segment()
.index()
.settings()
.sort_by_field
.clone()
.map(|sort_by_field| get_doc_id_mapping_from_field(sort_by_field, &self))
.transpose()?;
remap_and_write(
self.schema,
&self.per_field_postings_writers,
@@ -155,10 +130,8 @@ impl SegmentWriter {
self.fast_field_writers,
&self.fieldnorms_writer,
self.segment_serializer,
mapping.as_ref(),
)?;
let doc_opstamps = remap_doc_opstamps(self.doc_opstamps, mapping.as_ref());
Ok(doc_opstamps)
Ok(self.doc_opstamps)
}
/// Returns an estimation of the current memory usage of the segment writer.
@@ -419,11 +392,10 @@ fn remap_and_write(
fast_field_writers: FastFieldsWriter,
fieldnorms_writer: &FieldNormsWriter,
mut serializer: SegmentSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> crate::Result<()> {
debug!("remap-and-write");
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
fieldnorms_writer.serialize(fieldnorms_serializer, doc_id_map)?;
fieldnorms_writer.serialize(fieldnorms_serializer)?;
}
let fieldnorm_data = serializer
.segment()
@@ -434,39 +406,10 @@ fn remap_and_write(
schema,
per_field_postings_writers,
fieldnorm_readers,
doc_id_map,
serializer.get_postings_serializer(),
)?;
debug!("fastfield-serialize");
fast_field_writers.serialize(serializer.get_fast_field_write(), doc_id_map)?;
// finalize temp docstore and create version, which reflects the doc_id_map
if let Some(doc_id_map) = doc_id_map {
debug!("resort-docstore");
let store_write = serializer
.segment_mut()
.open_write(SegmentComponent::Store)?;
let settings = serializer.segment().index().settings();
let store_writer = StoreWriter::new(
store_write,
settings.docstore_compression,
settings.docstore_blocksize,
settings.docstore_compress_dedicated_thread,
)?;
let old_store_writer = std::mem::replace(&mut serializer.store_writer, store_writer);
old_store_writer.close()?;
let store_read = StoreReader::open(
serializer
.segment()
.open_read(SegmentComponent::TempStore)?,
1, /* The docstore is configured to have one doc per block, and each doc is accessed
* only once: we don't need caching. */
)?;
for old_doc_id in doc_id_map.iter_old_doc_ids() {
let doc_bytes = store_read.get_document_bytes(old_doc_id)?;
serializer.get_store_writer().store_bytes(&doc_bytes)?;
}
}
fast_field_writers.serialize(serializer.get_fast_field_write())?;
debug!("serializer-close");
serializer.close()?;

View File

@@ -222,8 +222,8 @@ pub use crate::core::{Executor, Searcher, SearcherGeneration};
pub use crate::directory::Directory;
#[allow(deprecated)] // Remove with index sorting
pub use crate::index::{
Index, IndexBuilder, IndexMeta, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
Segment, SegmentMeta, SegmentReader,
Index, IndexBuilder, IndexMeta, IndexSettings, InvertedIndexReader, Order, Segment,
SegmentMeta, SegmentReader,
};
pub use crate::indexer::{IndexWriter, SingleSegmentIndexWriter};
pub use crate::schema::{Document, TantivyDocument, Term};

View File

@@ -3,7 +3,6 @@ use std::io;
use common::json_path_writer::JSON_END_OF_PATH;
use stacker::Addr;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::indexer::path_to_unordered_id::OrderedPathId;
use crate::postings::postings_writer::SpecializedPostingsWriter;
use crate::postings::recorder::{BufferLender, DocIdRecorder, Recorder};
@@ -62,7 +61,6 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
&self,
term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
ordered_id_to_path: &[&str],
doc_id_map: Option<&DocIdMapping>,
ctx: &IndexingContext,
serializer: &mut FieldSerializer,
) -> io::Result<()> {
@@ -87,7 +85,6 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
SpecializedPostingsWriter::<Rec>::serialize_one_term(
term_buffer.serialized_value_bytes(),
*addr,
doc_id_map,
&mut buffer_lender,
ctx,
serializer,
@@ -96,7 +93,6 @@ impl<Rec: Recorder> PostingsWriter for JsonPostingsWriter<Rec> {
SpecializedPostingsWriter::<DocIdRecorder>::serialize_one_term(
term_buffer.serialized_value_bytes(),
*addr,
doc_id_map,
&mut buffer_lender,
ctx,
serializer,

View File

@@ -5,7 +5,6 @@ use std::ops::Range;
use stacker::Addr;
use crate::fieldnorm::FieldNormReaders;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::indexer::path_to_unordered_id::OrderedPathId;
use crate::postings::recorder::{BufferLender, Recorder};
use crate::postings::{
@@ -50,7 +49,6 @@ pub(crate) fn serialize_postings(
schema: Schema,
per_field_postings_writers: &PerFieldPostingsWriter,
fieldnorm_readers: FieldNormReaders,
doc_id_map: Option<&DocIdMapping>,
serializer: &mut InvertedIndexSerializer,
) -> crate::Result<()> {
// Replace unordered ids by ordered ids to be able to sort
@@ -86,7 +84,6 @@ pub(crate) fn serialize_postings(
postings_writer.serialize(
&term_offsets[byte_offsets],
&ordered_id_to_path,
doc_id_map,
&ctx,
&mut field_serializer,
)?;
@@ -122,7 +119,6 @@ pub(crate) trait PostingsWriter: Send + Sync {
&self,
term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
ordered_id_to_path: &[&str],
doc_id_map: Option<&DocIdMapping>,
ctx: &IndexingContext,
serializer: &mut FieldSerializer,
) -> io::Result<()>;
@@ -187,7 +183,6 @@ impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
pub(crate) fn serialize_one_term(
term: &[u8],
addr: Addr,
doc_id_map: Option<&DocIdMapping>,
buffer_lender: &mut BufferLender,
ctx: &IndexingContext,
serializer: &mut FieldSerializer,
@@ -195,7 +190,7 @@ impl<Rec: Recorder> SpecializedPostingsWriter<Rec> {
let recorder: Rec = ctx.term_index.read(addr);
let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32);
serializer.new_term(term, term_doc_freq, recorder.has_term_freq())?;
recorder.serialize(&ctx.arena, doc_id_map, serializer, buffer_lender);
recorder.serialize(&ctx.arena, serializer, buffer_lender);
serializer.close_term()?;
Ok(())
}
@@ -229,13 +224,12 @@ impl<Rec: Recorder> PostingsWriter for SpecializedPostingsWriter<Rec> {
&self,
term_addrs: &[(Field, OrderedPathId, &[u8], Addr)],
_ordered_id_to_path: &[&str],
doc_id_map: Option<&DocIdMapping>,
ctx: &IndexingContext,
serializer: &mut FieldSerializer,
) -> io::Result<()> {
let mut buffer_lender = BufferLender::default();
for (_field, _path_id, term, addr) in term_addrs {
Self::serialize_one_term(term, *addr, doc_id_map, &mut buffer_lender, ctx, serializer)?;
Self::serialize_one_term(term, *addr, &mut buffer_lender, ctx, serializer)?;
}
Ok(())
}

View File

@@ -1,7 +1,6 @@
use common::read_u32_vint;
use stacker::{ExpUnrolledLinkedList, MemoryArena};
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::FieldSerializer;
use crate::DocId;
@@ -71,7 +70,6 @@ pub(crate) trait Recorder: Copy + Default + Send + Sync + 'static {
fn serialize(
&self,
arena: &MemoryArena,
doc_id_map: Option<&DocIdMapping>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
);
@@ -115,26 +113,15 @@ impl Recorder for DocIdRecorder {
fn serialize(
&self,
arena: &MemoryArena,
doc_id_map: Option<&DocIdMapping>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
) {
let (buffer, doc_ids) = buffer_lender.lend_all();
let buffer = buffer_lender.lend_u8();
// TODO avoid reading twice.
self.stack.read_to_end(arena, buffer);
if let Some(doc_id_map) = doc_id_map {
let iter = get_sum_reader(VInt32Reader::new(&buffer[..]));
doc_ids.extend(iter.map(|old_doc_id| doc_id_map.get_new_doc_id(old_doc_id)));
doc_ids.sort_unstable();
for doc in doc_ids {
serializer.write_doc(*doc, 0u32, &[][..]);
}
} else {
let iter = get_sum_reader(VInt32Reader::new(&buffer[..]));
for doc_id in iter {
serializer.write_doc(doc_id, 0u32, &[][..]);
}
let iter = get_sum_reader(VInt32Reader::new(&buffer[..]));
for doc_id in iter {
serializer.write_doc(doc_id, 0u32, &[][..]);
}
}
@@ -194,35 +181,18 @@ impl Recorder for TermFrequencyRecorder {
fn serialize(
&self,
arena: &MemoryArena,
doc_id_map: Option<&DocIdMapping>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
) {
let buffer = buffer_lender.lend_u8();
self.stack.read_to_end(arena, buffer);
let mut u32_it = VInt32Reader::new(&buffer[..]);
if let Some(doc_id_map) = doc_id_map {
let mut doc_id_and_tf = vec![];
let mut prev_doc = 0;
while let Some(delta_doc_id) = u32_it.next() {
let doc_id = prev_doc + delta_doc_id;
prev_doc = doc_id;
let term_freq = u32_it.next().unwrap_or(self.current_tf);
doc_id_and_tf.push((doc_id_map.get_new_doc_id(doc_id), term_freq));
}
doc_id_and_tf.sort_unstable_by_key(|&(doc_id, _)| doc_id);
for (doc_id, tf) in doc_id_and_tf {
serializer.write_doc(doc_id, tf, &[][..]);
}
} else {
let mut prev_doc = 0;
while let Some(delta_doc_id) = u32_it.next() {
let doc_id = prev_doc + delta_doc_id;
prev_doc = doc_id;
let term_freq = u32_it.next().unwrap_or(self.current_tf);
serializer.write_doc(doc_id, term_freq, &[][..]);
}
let mut prev_doc = 0;
while let Some(delta_doc_id) = u32_it.next() {
let doc_id = prev_doc + delta_doc_id;
prev_doc = doc_id;
let term_freq = u32_it.next().unwrap_or(self.current_tf);
serializer.write_doc(doc_id, term_freq, &[][..]);
}
}
@@ -268,14 +238,12 @@ impl Recorder for TfAndPositionRecorder {
fn serialize(
&self,
arena: &MemoryArena,
doc_id_map: Option<&DocIdMapping>,
serializer: &mut FieldSerializer<'_>,
buffer_lender: &mut BufferLender,
) {
let (buffer_u8, buffer_positions) = buffer_lender.lend_all();
self.stack.read_to_end(arena, buffer_u8);
let mut u32_it = VInt32Reader::new(&buffer_u8[..]);
let mut doc_id_and_positions = vec![];
let mut prev_doc = 0;
while let Some(delta_doc_id) = u32_it.next() {
let doc_id = prev_doc + delta_doc_id;
@@ -294,19 +262,7 @@ impl Recorder for TfAndPositionRecorder {
}
}
}
if let Some(doc_id_map) = doc_id_map {
// this simple variant to remap may consume to much memory
doc_id_and_positions
.push((doc_id_map.get_new_doc_id(doc_id), buffer_positions.to_vec()));
} else {
serializer.write_doc(doc_id, buffer_positions.len() as u32, buffer_positions);
}
}
if doc_id_map.is_some() {
doc_id_and_positions.sort_unstable_by_key(|&(doc_id, _)| doc_id);
for (doc_id, positions) in doc_id_and_positions {
serializer.write_doc(doc_id, positions.len() as u32, &positions);
}
serializer.write_doc(doc_id, buffer_positions.len() as u32, buffer_positions);
}
}

View File

@@ -249,9 +249,12 @@ impl Term {
#[inline]
pub fn append_path(&mut self, bytes: &[u8]) -> &mut [u8] {
let len_before = self.0.len();
if bytes.contains(&0u8) {
self.0
.extend(bytes.iter().map(|&b| if b == 0 { b'0' } else { b }));
if bytes.contains(&JSON_END_OF_PATH) {
self.0.extend(
bytes
.iter()
.map(|&b| if b == JSON_END_OF_PATH { b'0' } else { b }),
);
} else {
self.0.extend_from_slice(bytes);
}