mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 04:52:55 +00:00
Compare commits
68 Commits
fastfieldc
...
column-tra
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a072e3c18 | ||
|
|
84e0c75598 | ||
|
|
08c4412d73 | ||
|
|
70e58adff9 | ||
|
|
0d1cd119e9 | ||
|
|
d3dd620048 | ||
|
|
e89c220b56 | ||
|
|
a451f6d60d | ||
|
|
f740ddeee3 | ||
|
|
7a26cc9022 | ||
|
|
54972caa7c | ||
|
|
5d436759b0 | ||
|
|
6f563b1606 | ||
|
|
095fb68fda | ||
|
|
6316eaefc6 | ||
|
|
5331be800b | ||
|
|
c73b425bc1 | ||
|
|
54cfd0d154 | ||
|
|
0dd62169c8 | ||
|
|
3a9727aa91 | ||
|
|
17093e8ffe | ||
|
|
03e4630cd8 | ||
|
|
4ae0317d68 | ||
|
|
107b19855f | ||
|
|
d8f66ba07e | ||
|
|
f908549245 | ||
|
|
3673a5df9b | ||
|
|
3984cafccc | ||
|
|
298b5dd726 | ||
|
|
8bbb22e9bf | ||
|
|
513f68209d | ||
|
|
91f2f7e722 | ||
|
|
c476b530cf | ||
|
|
77dd202e19 | ||
|
|
00ebff3c16 | ||
|
|
9a6d37c42c | ||
|
|
bb01e99e05 | ||
|
|
535f1a5d83 | ||
|
|
625f9174a7 | ||
|
|
11a4d97cf5 | ||
|
|
1c3d39677a | ||
|
|
6f65995cfd | ||
|
|
e2e4190571 | ||
|
|
82209c58aa | ||
|
|
21519788ea | ||
|
|
4c6c6e4a9c | ||
|
|
df0ac9e901 | ||
|
|
71ab482720 | ||
|
|
2ae383e452 | ||
|
|
8b3a6f6231 | ||
|
|
11edd6bd59 | ||
|
|
193a3c21f4 | ||
|
|
998b1263f6 | ||
|
|
72272bdf81 | ||
|
|
c39c2d79da | ||
|
|
67d94f5bd2 | ||
|
|
abbd934ac9 | ||
|
|
7f9ba0ee50 | ||
|
|
8edcd6f958 | ||
|
|
f50700835d | ||
|
|
494e92ca59 | ||
|
|
4a3169011d | ||
|
|
050fc5dde9 | ||
|
|
f01cb7d3aa | ||
|
|
9811d15657 | ||
|
|
31ba5a3c16 | ||
|
|
71041b2314 | ||
|
|
09aae134e6 |
4
.github/workflows/coverage.yml
vendored
4
.github/workflows/coverage.yml
vendored
@@ -12,12 +12,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly --component llvm-tools-preview
|
||||
run: rustup toolchain install nightly --profile minimal --component llvm-tools-preview
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo +nightly llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
||||
files: lcov.info
|
||||
|
||||
3
.github/workflows/long_running.yml
vendored
3
.github/workflows/long_running.yml
vendored
@@ -19,11 +19,10 @@ jobs:
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Run indexing_unsorted
|
||||
run: cargo test indexing_unsorted -- --ignored
|
||||
- name: Run indexing_sorted
|
||||
run: cargo test indexing_sorted -- --ignored
|
||||
|
||||
|
||||
52
.github/workflows/test.yml
vendored
52
.github/workflows/test.yml
vendored
@@ -10,34 +10,27 @@ env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test:
|
||||
check:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install latest nightly to test also against unstable feature flag
|
||||
|
||||
- name: Install nightly
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
profile: minimal
|
||||
components: clippy
|
||||
|
||||
- name: Build
|
||||
run: cargo build --verbose --workspace
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints --verbose --workspace
|
||||
|
||||
- name: Run tests quickwit feature
|
||||
run: cargo +stable test --features mmap,quickwit,failpoints --verbose --workspace
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Check Formatting
|
||||
run: cargo +nightly fmt --all -- --check
|
||||
@@ -48,3 +41,34 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --tests
|
||||
|
||||
test:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
features: [
|
||||
{ label: "all", flags: "mmap,brotli-compression,lz4-compression,snappy-compression,zstd-compression,failpoints" },
|
||||
{ label: "quickwit", flags: "mmap,quickwit,failpoints" }
|
||||
]
|
||||
|
||||
name: test-${{ matrix.features.label}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run tests
|
||||
run: cargo +stable nextest run --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
|
||||
- name: Run doctests
|
||||
run: cargo +stable test --doc --features ${{ matrix.features.flags }} --verbose --workspace
|
||||
|
||||
@@ -60,7 +60,6 @@ pretty_assertions = "1.2.1"
|
||||
serde_cbor = { version = "0.11.2", optional = true }
|
||||
async-trait = "0.1.53"
|
||||
arc-swap = "1.5.0"
|
||||
gcd = "2.1.0"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.3.9"
|
||||
|
||||
@@ -82,14 +82,16 @@ impl BitUnpacker {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bit_width(&self) -> u8 {
|
||||
self.num_bits as u8
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get(&self, idx: u64, data: &[u8]) -> u64 {
|
||||
if self.num_bits == 0 {
|
||||
return 0u64;
|
||||
}
|
||||
let num_bits = self.num_bits;
|
||||
let mask = self.mask;
|
||||
let addr_in_bits = idx * num_bits;
|
||||
let addr_in_bits = idx * self.num_bits;
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
debug_assert!(
|
||||
@@ -101,7 +103,7 @@ impl BitUnpacker {
|
||||
.unwrap();
|
||||
let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes);
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
val_shifted & mask
|
||||
val_shifted & self.mask
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,10 @@ fn metadata_test() {
|
||||
assert_eq!(meta.num_bits(), 6);
|
||||
}
|
||||
|
||||
fn mem_usage<T>(items: &Vec<T>) -> usize {
|
||||
items.capacity() * std::mem::size_of::<T>()
|
||||
}
|
||||
|
||||
impl BlockedBitpacker {
|
||||
pub fn new() -> Self {
|
||||
let mut compressed_blocks = vec![];
|
||||
@@ -73,10 +77,8 @@ impl BlockedBitpacker {
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
std::mem::size_of::<BlockedBitpacker>()
|
||||
+ self.compressed_blocks.capacity()
|
||||
+ self.offset_and_bits.capacity()
|
||||
* std::mem::size_of_val(&self.offset_and_bits.get(0).cloned().unwrap_or_default())
|
||||
+ self.buffer.capacity()
|
||||
* std::mem::size_of_val(&self.buffer.get(0).cloned().unwrap_or_default())
|
||||
+ mem_usage(&self.offset_and_bits)
|
||||
+ mem_usage(&self.buffer)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
||||
@@ -62,7 +62,7 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
pub trait TerminatingWrite: Write + Send {
|
||||
pub trait TerminatingWrite: Write + Send + Sync {
|
||||
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
|
||||
fn terminate(mut self) -> io::Result<()>
|
||||
where Self: Sized {
|
||||
|
||||
@@ -7,10 +7,11 @@
|
||||
// Of course, you can have a look at the tantivy's built-in collectors
|
||||
// such as the `CountCollector` for more examples.
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::{Collector, SegmentCollector};
|
||||
use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use tantivy::fastfield::DynamicFastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT};
|
||||
use tantivy::{doc, Index, Score, SegmentReader};
|
||||
@@ -103,7 +104,7 @@ impl SegmentCollector for StatsSegmentCollector {
|
||||
type Fruit = Option<Stats>;
|
||||
|
||||
fn collect(&mut self, doc: u32, _score: Score) {
|
||||
let value = self.fast_field_reader.get(doc) as f64;
|
||||
let value = self.fast_field_reader.get_val(doc as u64) as f64;
|
||||
self.stats.count += 1;
|
||||
self.stats.sum += value;
|
||||
self.stats.squared_sum += value * value;
|
||||
|
||||
@@ -2,8 +2,8 @@ use std::cmp::Reverse;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use tantivy::collector::TopDocs;
|
||||
use tantivy::fastfield::FastFieldReader;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::{Field, Schema, FAST, TEXT};
|
||||
use tantivy::{
|
||||
@@ -52,7 +52,7 @@ impl Warmer for DynamicPriceColumn {
|
||||
let product_id_reader = segment.fast_fields().u64(self.field)?;
|
||||
let product_ids: Vec<ProductId> = segment
|
||||
.doc_ids_alive()
|
||||
.map(|doc| product_id_reader.get(doc))
|
||||
.map(|doc| product_id_reader.get_val(doc as u64))
|
||||
.collect();
|
||||
let mut prices_it = self.price_fetcher.fetch_prices(&product_ids).into_iter();
|
||||
let mut price_vals: Vec<Price> = Vec::new();
|
||||
|
||||
@@ -11,11 +11,13 @@ description = "Fast field codecs used by tantivy"
|
||||
[dependencies]
|
||||
common = { version = "0.3", path = "../common/", package = "tantivy-common" }
|
||||
tantivy-bitpacker = { version="0.2", path = "../bitpacker/" }
|
||||
ownedbytes = { version = "0.3.0", path = "../ownedbytes" }
|
||||
prettytable-rs = {version="0.9.0", optional= true}
|
||||
rand = {version="0.8.3", optional= true}
|
||||
|
||||
[dev-dependencies]
|
||||
more-asserts = "0.3.0"
|
||||
proptest = "1.0.0"
|
||||
rand = "0.8.3"
|
||||
|
||||
[features]
|
||||
|
||||
@@ -4,13 +4,9 @@ extern crate test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use fastfield_codecs::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
|
||||
use fastfield_codecs::linearinterpol::{
|
||||
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::bitpacked::BitpackedCodec;
|
||||
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
|
||||
use fastfield_codecs::linear::LinearCodec;
|
||||
use fastfield_codecs::*;
|
||||
|
||||
fn get_data() -> Vec<u64> {
|
||||
@@ -29,72 +25,59 @@ mod tests {
|
||||
fn value_iter() -> impl Iterator<Item = u64> {
|
||||
0..20_000
|
||||
}
|
||||
fn bench_get<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
|
||||
b: &mut Bencher,
|
||||
data: &[u64],
|
||||
) {
|
||||
fn bench_get<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let mut bytes = vec![];
|
||||
S::serialize(
|
||||
&mut bytes,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = R::open_from_bytes(&bytes).unwrap();
|
||||
Codec::serialize(&mut bytes, &data).unwrap();
|
||||
let reader = Codec::open_from_bytes(OwnedBytes::new(bytes)).unwrap();
|
||||
b.iter(|| {
|
||||
let mut sum = 0u64;
|
||||
for pos in value_iter() {
|
||||
reader.get_u64(pos as u64, &bytes);
|
||||
let val = reader.get_val(pos as u64);
|
||||
debug_assert_eq!(data[pos as usize], val);
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
sum
|
||||
});
|
||||
}
|
||||
fn bench_create<S: FastFieldCodecSerializer>(b: &mut Bencher, data: &[u64]) {
|
||||
let mut bytes = vec![];
|
||||
fn bench_create<Codec: FastFieldCodec>(b: &mut Bencher, data: &[u64]) {
|
||||
let mut bytes = Vec::new();
|
||||
b.iter(|| {
|
||||
S::serialize(
|
||||
&mut bytes,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
bytes.clear();
|
||||
Codec::serialize(&mut bytes, &data).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
use ownedbytes::OwnedBytes;
|
||||
use test::Bencher;
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<BitpackedFastFieldSerializer>(b, &data);
|
||||
bench_create::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<LinearInterpolFastFieldSerializer>(b, &data);
|
||||
bench_create::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_create::<MultiLinearInterpolFastFieldSerializer>(b, &data);
|
||||
bench_create::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(b, &data);
|
||||
bench_get::<BitpackedCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>(b, &data);
|
||||
bench_get::<LinearCodec>(b, &data);
|
||||
}
|
||||
#[bench]
|
||||
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
|
||||
let data: Vec<_> = get_data();
|
||||
bench_get::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>(
|
||||
b, &data,
|
||||
);
|
||||
bench_get::<BlockwiseLinearCodec>(b, &data);
|
||||
}
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
||||
|
||||
@@ -1,37 +1,26 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct BitpackedFastFieldReader {
|
||||
pub struct BitpackedReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
pub min_value_u64: u64,
|
||||
pub max_value_u64: u64,
|
||||
min_value_u64: u64,
|
||||
max_value_u64: u64,
|
||||
num_vals: u64,
|
||||
}
|
||||
|
||||
impl FastFieldCodecReader for BitpackedFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - 16);
|
||||
let min_value = u64::deserialize(&mut footer)?;
|
||||
let amplitude = u64::deserialize(&mut footer)?;
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedFastFieldReader {
|
||||
min_value_u64: min_value,
|
||||
max_value_u64: max_value,
|
||||
bit_unpacker,
|
||||
})
|
||||
}
|
||||
impl Column for BitpackedReader {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
self.min_value_u64 + self.bit_unpacker.get(doc, data)
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
self.min_value_u64 + self.bit_unpacker.get(doc, &self.data)
|
||||
}
|
||||
#[inline]
|
||||
fn min_value(&self) -> u64 {
|
||||
@@ -41,16 +30,21 @@ impl FastFieldCodecReader for BitpackedFastFieldReader {
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value_u64
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
pub struct BitpackedFastFieldSerializerLegacy<'a, W: 'a + Write> {
|
||||
pub struct BitpackedSerializerLegacy<'a, W: 'a + Write> {
|
||||
bit_packer: BitPacker,
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
num_vals: u64,
|
||||
amplitude: u64,
|
||||
num_bits: u8,
|
||||
}
|
||||
|
||||
impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
|
||||
impl<'a, W: Write> BitpackedSerializerLegacy<'a, W> {
|
||||
/// Creates a new fast field serializer.
|
||||
///
|
||||
/// The serializer in fact encode the values by bitpacking
|
||||
@@ -63,15 +57,16 @@ impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
|
||||
write: &'a mut W,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
) -> io::Result<BitpackedFastFieldSerializerLegacy<'a, W>> {
|
||||
) -> io::Result<BitpackedSerializerLegacy<'a, W>> {
|
||||
assert!(min_value <= max_value);
|
||||
let amplitude = max_value - min_value;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_packer = BitPacker::new();
|
||||
Ok(BitpackedFastFieldSerializerLegacy {
|
||||
Ok(BitpackedSerializerLegacy {
|
||||
bit_packer,
|
||||
write,
|
||||
min_value,
|
||||
num_vals: 0,
|
||||
amplitude,
|
||||
num_bits,
|
||||
})
|
||||
@@ -82,21 +77,45 @@ impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
|
||||
let val_to_write: u64 = val - self.min_value;
|
||||
self.bit_packer
|
||||
.write(val_to_write, self.num_bits, &mut self.write)?;
|
||||
self.num_vals += 1;
|
||||
Ok(())
|
||||
}
|
||||
pub fn close_field(mut self) -> io::Result<()> {
|
||||
self.bit_packer.close(&mut self.write)?;
|
||||
self.min_value.serialize(&mut self.write)?;
|
||||
self.amplitude.serialize(&mut self.write)?;
|
||||
self.num_vals.serialize(&mut self.write)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BitpackedFastFieldSerializer {}
|
||||
pub struct BitpackedCodec;
|
||||
|
||||
impl FastFieldCodec for BitpackedCodec {
|
||||
/// The CODEC_TYPE is an enum value used for serialization.
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Bitpacked;
|
||||
|
||||
type Reader = BitpackedReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let footer_offset = bytes.len() - 24;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let min_value = u64::deserialize(&mut footer)?;
|
||||
let amplitude = u64::deserialize(&mut footer)?;
|
||||
let num_vals = u64::deserialize(&mut footer)?;
|
||||
let max_value = min_value + amplitude;
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(BitpackedReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
min_value_u64: min_value,
|
||||
max_value_u64: max_value,
|
||||
num_vals,
|
||||
})
|
||||
}
|
||||
|
||||
impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
|
||||
const NAME: &'static str = "Bitpacked";
|
||||
const ID: u8 = 1;
|
||||
/// Serializes data with the BitpackedFastFieldSerializer.
|
||||
///
|
||||
/// The serializer in fact encode the values by bitpacking
|
||||
@@ -105,51 +124,41 @@ impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
|
||||
/// It requires a `min_value` and a `max_value` to compute
|
||||
/// compute the minimum number of bits required to encode
|
||||
/// values.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
_fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
let mut serializer =
|
||||
BitpackedFastFieldSerializerLegacy::open(write, stats.min_value, stats.max_value)?;
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
|
||||
let mut serializer = BitpackedSerializerLegacy::open(
|
||||
write,
|
||||
fastfield_accessor.min_value(),
|
||||
fastfield_accessor.max_value(),
|
||||
)?;
|
||||
|
||||
for val in data_iter {
|
||||
for val in fastfield_accessor.iter() {
|
||||
serializer.add_val(val)?;
|
||||
}
|
||||
serializer.close_field()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
_stats: FastFieldStats,
|
||||
) -> bool {
|
||||
true
|
||||
}
|
||||
fn estimate(_fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
let amplitude = stats.max_value - stats.min_value;
|
||||
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
|
||||
let amplitude = fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
||||
let num_bits = compute_num_bits(amplitude);
|
||||
let num_bits_uncompressed = 64;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) {
|
||||
crate::tests::create_and_validate::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(
|
||||
data, name,
|
||||
);
|
||||
crate::tests::create_and_validate::<BitpackedCodec>(data, name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//! MultiLinearInterpol compressor uses linear interpolation to guess a values and stores the
|
||||
//! The BlockwiseLinear codec uses linear interpolation to guess a values and stores the
|
||||
//! offset, but in blocks of 512.
|
||||
//!
|
||||
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
|
||||
@@ -14,17 +14,20 @@ use std::io::{self, Read, Write};
|
||||
use std::ops::Sub;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter, DeserializeFrom};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use crate::linear::{get_calculated_value, get_slope};
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
const CHUNK_SIZE: u64 = 512;
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct MultiLinearInterpolFastFieldReader {
|
||||
pub footer: MultiLinearInterpolFooter,
|
||||
pub struct BlockwiseLinearReader {
|
||||
data: OwnedBytes,
|
||||
pub footer: BlockwiseLinearFooter,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
@@ -99,14 +102,14 @@ impl BinarySerializable for Function {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MultiLinearInterpolFooter {
|
||||
pub struct BlockwiseLinearFooter {
|
||||
pub num_vals: u64,
|
||||
pub min_value: u64,
|
||||
pub max_value: u64,
|
||||
interpolations: Vec<Function>,
|
||||
}
|
||||
|
||||
impl BinarySerializable for MultiLinearInterpolFooter {
|
||||
impl BinarySerializable for BlockwiseLinearFooter {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
let mut out = vec![];
|
||||
self.num_vals.serialize(&mut out)?;
|
||||
@@ -118,8 +121,8 @@ impl BinarySerializable for MultiLinearInterpolFooter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<MultiLinearInterpolFooter> {
|
||||
let mut footer = MultiLinearInterpolFooter {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<BlockwiseLinearFooter> {
|
||||
let mut footer = BlockwiseLinearFooter {
|
||||
num_vals: u64::deserialize(reader)?,
|
||||
min_value: u64::deserialize(reader)?,
|
||||
max_value: u64::deserialize(reader)?,
|
||||
@@ -143,26 +146,20 @@ fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Functio
|
||||
&interpolations[get_interpolation_position(doc)]
|
||||
}
|
||||
|
||||
impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
|
||||
let footer = MultiLinearInterpolFooter::deserialize(&mut footer)?;
|
||||
|
||||
Ok(MultiLinearInterpolFastFieldReader { footer })
|
||||
}
|
||||
|
||||
impl Column for BlockwiseLinearReader {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
let interpolation = get_interpolation_function(doc, &self.footer.interpolations);
|
||||
let doc = doc - interpolation.start_pos;
|
||||
let calculated_value =
|
||||
get_calculated_value(interpolation.value_start_pos, doc, interpolation.slope);
|
||||
let diff = interpolation
|
||||
.bit_unpacker
|
||||
.get(doc, &data[interpolation.data_start_offset as usize..]);
|
||||
fn get_val(&self, idx: u64) -> u64 {
|
||||
let interpolation = get_interpolation_function(idx, &self.footer.interpolations);
|
||||
let in_block_idx = idx - interpolation.start_pos;
|
||||
let calculated_value = get_calculated_value(
|
||||
interpolation.value_start_pos,
|
||||
in_block_idx,
|
||||
interpolation.slope,
|
||||
);
|
||||
let diff = interpolation.bit_unpacker.get(
|
||||
in_block_idx,
|
||||
&self.data[interpolation.data_start_offset as usize..],
|
||||
);
|
||||
(calculated_value + diff) - interpolation.positive_val_offset
|
||||
}
|
||||
|
||||
@@ -174,39 +171,38 @@ impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
|
||||
fn max_value(&self) -> u64 {
|
||||
self.footer.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.footer.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
|
||||
}
|
||||
/// Same as LinearSerializer, but working on chunks of CHUNK_SIZE elements.
|
||||
pub struct BlockwiseLinearCodec;
|
||||
|
||||
#[inline]
|
||||
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
(first_val as i64 + (pos as f32 * slope) as i64) as u64
|
||||
}
|
||||
impl FastFieldCodec for BlockwiseLinearCodec {
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::BlockwiseLinear;
|
||||
|
||||
/// Same as LinearInterpolFastFieldSerializer, but working on chunks of CHUNK_SIZE elements.
|
||||
pub struct MultiLinearInterpolFastFieldSerializer {}
|
||||
type Reader = BlockwiseLinearReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
|
||||
let footer_offset = bytes.len() - 4 - footer_len as usize;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let footer = BlockwiseLinearFooter::deserialize(&mut footer)?;
|
||||
Ok(BlockwiseLinearReader { data, footer })
|
||||
}
|
||||
|
||||
impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
const NAME: &'static str = "MultiLinearInterpol";
|
||||
const ID: u8 = 3;
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
_data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
assert!(stats.min_value <= stats.max_value);
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
|
||||
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
||||
|
||||
let mut first_function = Function {
|
||||
end_pos: stats.num_vals,
|
||||
end_pos: fastfield_accessor.num_vals(),
|
||||
value_start_pos: first_val,
|
||||
value_end_pos: last_val,
|
||||
..Default::default()
|
||||
@@ -217,7 +213,7 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
// Since we potentially apply multiple passes over the data, the data is cached.
|
||||
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
|
||||
// iteration)
|
||||
let data = data_iter.collect::<Vec<_>>();
|
||||
let data = fastfield_accessor.iter().collect::<Vec<_>>();
|
||||
|
||||
//// let's split this into chunks of CHUNK_SIZE
|
||||
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
|
||||
@@ -280,49 +276,47 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
let footer = MultiLinearInterpolFooter {
|
||||
num_vals: stats.num_vals,
|
||||
min_value: stats.min_value,
|
||||
max_value: stats.max_value,
|
||||
let footer = BlockwiseLinearFooter {
|
||||
num_vals: fastfield_accessor.num_vals(),
|
||||
min_value: fastfield_accessor.min_value(),
|
||||
max_value: fastfield_accessor.max_value(),
|
||||
interpolations,
|
||||
};
|
||||
footer.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
) -> bool {
|
||||
if stats.num_vals < 5_000 {
|
||||
return false;
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima are for the deviation of the calculated value and
|
||||
/// the offset is also unknown.
|
||||
#[allow(clippy::question_mark)]
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
|
||||
if fastfield_accessor.num_vals() < 10 * CHUNK_SIZE {
|
||||
return None;
|
||||
}
|
||||
|
||||
// On serialization the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algorithm should be fine
|
||||
let theorethical_maximum_offset = stats.max_value - stats.min_value;
|
||||
if stats
|
||||
.max_value
|
||||
let theorethical_maximum_offset =
|
||||
fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
||||
if fastfield_accessor
|
||||
.max_value()
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return false;
|
||||
return None;
|
||||
}
|
||||
true
|
||||
}
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima are for the deviation of the calculated value and
|
||||
/// the offset is also unknown.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
|
||||
let first_val_in_first_block = fastfield_accessor.get_val(0);
|
||||
let last_elem_in_first_chunk = CHUNK_SIZE.min(stats.num_vals);
|
||||
let last_elem_in_first_chunk = CHUNK_SIZE.min(fastfield_accessor.num_vals());
|
||||
let last_val_in_first_block =
|
||||
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
|
||||
let slope = get_slope(
|
||||
first_val_in_first_block,
|
||||
last_val_in_first_block,
|
||||
stats.num_vals,
|
||||
fastfield_accessor.num_vals(),
|
||||
);
|
||||
|
||||
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
|
||||
@@ -349,11 +343,11 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
|
||||
//
|
||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * fastfield_accessor.num_vals() as u64
|
||||
// function metadata per block
|
||||
+ 29 * (stats.num_vals / CHUNK_SIZE);
|
||||
let num_bits_uncompressed = 64 * stats.num_vals;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
+ 29 * (fastfield_accessor.num_vals() / CHUNK_SIZE);
|
||||
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -368,20 +362,35 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
||||
crate::tests::create_and_validate::<
|
||||
MultiLinearInterpolFastFieldSerializer,
|
||||
MultiLinearInterpolFastFieldReader,
|
||||
>(data, name)
|
||||
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
|
||||
crate::tests::create_and_validate::<BlockwiseLinearCodec>(data, name)
|
||||
}
|
||||
|
||||
const HIGHEST_BIT: u64 = 1 << 63;
|
||||
pub fn i64_to_u64(val: i64) -> u64 {
|
||||
(val as u64) ^ HIGHEST_BIT
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression_i64() {
|
||||
let data = (i64::MAX - 600_000..=i64::MAX - 550_000)
|
||||
.map(i64_to_u64)
|
||||
.collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large i64").unwrap();
|
||||
assert!(actual_compression < 0.2);
|
||||
assert!(estimate < 0.20);
|
||||
assert!(estimate > 0.15);
|
||||
assert!(actual_compression > 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large");
|
||||
create_and_validate(&data, "simple monotonically large").unwrap();
|
||||
assert!(actual_compression < 0.2);
|
||||
assert!(estimate < 0.20);
|
||||
assert!(estimate > 0.15);
|
||||
@@ -390,7 +399,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
49
fastfield_codecs/src/column.rs
Normal file
49
fastfield_codecs/src/column.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
pub trait Column<T = u64> {
|
||||
/// Return the value associated to the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `idx` is greater than the column length.
|
||||
fn get_val(&self, idx: u64) -> T;
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
/// associated with the `DocId` going from
|
||||
/// `start` to `start + output.len()`.
|
||||
///
|
||||
/// Regardless of the type of `Item`, this method works
|
||||
/// - transmuting the output array
|
||||
/// - extracting the `Item`s as if they were `u64`
|
||||
/// - possibly converting the `u64` value to the right type.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
fn get_range(&self, start: u64, output: &mut [T]) {
|
||||
for (out, idx) in output.iter_mut().zip(start..) {
|
||||
*out = self.get_val(idx);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// The min value does not take in account of possible
|
||||
/// deleted document, and should be considered as a lower bound
|
||||
/// of the actual minimum value.
|
||||
fn min_value(&self) -> T;
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// The max value does not take in account of possible
|
||||
/// deleted document, and should be considered as an upper bound
|
||||
/// of the actual maximum value
|
||||
fn max_value(&self) -> T;
|
||||
|
||||
fn num_vals(&self) -> u64;
|
||||
/// Returns a iterator over the data
|
||||
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = T> + 'a> {
|
||||
Box::new((0..self.num_vals()).map(|idx| self.get_val(idx)))
|
||||
}
|
||||
}
|
||||
@@ -5,61 +5,81 @@ extern crate more_asserts;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
pub mod bitpacked;
|
||||
pub mod linearinterpol;
|
||||
pub mod multilinearinterpol;
|
||||
pub mod blockwise_linear;
|
||||
pub mod linear;
|
||||
|
||||
pub trait FastFieldCodecReader: Sized {
|
||||
/// reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
|
||||
mod column;
|
||||
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64;
|
||||
pub use self::column::Column;
|
||||
|
||||
fn min_value(&self) -> u64;
|
||||
fn max_value(&self) -> u64;
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
pub enum FastFieldCodecType {
|
||||
Bitpacked = 1,
|
||||
Linear = 2,
|
||||
BlockwiseLinear = 3,
|
||||
Gcd = 4,
|
||||
}
|
||||
|
||||
impl BinarySerializable for FastFieldCodecType {
|
||||
fn serialize<W: Write>(&self, wrt: &mut W) -> io::Result<()> {
|
||||
self.to_code().serialize(wrt)
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let code = u8::deserialize(reader)?;
|
||||
let codec_type: Self = Self::from_code(code)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Unknown code `{code}.`"))?;
|
||||
Ok(codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodecType {
|
||||
pub fn to_code(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub fn from_code(code: u8) -> Option<Self> {
|
||||
match code {
|
||||
1 => Some(Self::Bitpacked),
|
||||
2 => Some(Self::Linear),
|
||||
3 => Some(Self::BlockwiseLinear),
|
||||
4 => Some(Self::Gcd),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The FastFieldSerializerEstimate trait is required on all variants
|
||||
/// of fast field compressions, to decide which one to choose.
|
||||
pub trait FastFieldCodecSerializer {
|
||||
pub trait FastFieldCodec {
|
||||
/// A codex needs to provide a unique name and id, which is
|
||||
/// used for debugging and de/serialization.
|
||||
const NAME: &'static str;
|
||||
const ID: u8;
|
||||
const CODEC_TYPE: FastFieldCodecType;
|
||||
|
||||
/// Check if the Codec is able to compress the data
|
||||
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> bool;
|
||||
type Reader: Column<u64>;
|
||||
|
||||
/// Reads the metadata and returns the CodecReader
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader>;
|
||||
|
||||
/// Serializes the data using the serializer into write.
|
||||
///
|
||||
/// The fastfield_accessor iterator should be preferred over using fastfield_accessor for
|
||||
/// performance reasons.
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column<u64>) -> io::Result<()>;
|
||||
|
||||
/// Returns an estimate of the compression ratio.
|
||||
/// If the codec is not applicable, returns `None`.
|
||||
///
|
||||
/// The baseline is uncompressed 64bit data.
|
||||
///
|
||||
/// It could make sense to also return a value representing
|
||||
/// computational complexity.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32;
|
||||
|
||||
/// Serializes the data using the serializer into write.
|
||||
/// There are multiple iterators, in case the codec needs to read the data multiple times.
|
||||
/// The iterators should be preferred over using fastfield_accessor for performance reasons.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()>;
|
||||
}
|
||||
|
||||
/// FastFieldDataAccess is the trait to access fast field data during serialization and estimation.
|
||||
pub trait FastFieldDataAccess {
|
||||
/// Return the value associated to the given position.
|
||||
///
|
||||
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
|
||||
/// reasons.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `position` is greater than the index.
|
||||
fn get_val(&self, position: u64) -> u64;
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -70,61 +90,102 @@ pub struct FastFieldStats {
|
||||
pub num_vals: u64,
|
||||
}
|
||||
|
||||
impl<'a> FastFieldDataAccess for &'a [u64] {
|
||||
struct VecColum<'a>(&'a [u64]);
|
||||
impl<'a> Column for VecColum<'a> {
|
||||
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
self[position as usize]
|
||||
self.0[position as usize]
|
||||
}
|
||||
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||
Box::new(self.0.iter().cloned())
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.0.iter().min().cloned().unwrap_or(0)
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.0.iter().max().cloned().unwrap_or(0)
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.0.len() as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldDataAccess for Vec<u64> {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
self[position as usize]
|
||||
impl<'a> From<&'a [u64]> for VecColum<'a> {
|
||||
fn from(data: &'a [u64]) -> Self {
|
||||
Self(data)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer};
|
||||
use crate::linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer};
|
||||
use crate::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use proptest::prelude::*;
|
||||
use proptest::strategy::Strategy;
|
||||
use proptest::{prop_oneof, proptest};
|
||||
|
||||
pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
|
||||
use crate::bitpacked::BitpackedCodec;
|
||||
use crate::blockwise_linear::BlockwiseLinearCodec;
|
||||
use crate::linear::LinearCodec;
|
||||
|
||||
pub fn create_and_validate<Codec: FastFieldCodec>(
|
||||
data: &[u64],
|
||||
name: &str,
|
||||
) -> (f32, f32) {
|
||||
if !S::is_applicable(&data, crate::tests::stats_from_vec(data)) {
|
||||
return (f32::MAX, 0.0);
|
||||
}
|
||||
let estimation = S::estimate(&data, crate::tests::stats_from_vec(data));
|
||||
let mut out = vec![];
|
||||
S::serialize(
|
||||
&mut out,
|
||||
&data,
|
||||
crate::tests::stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
) -> Option<(f32, f32)> {
|
||||
let estimation = Codec::estimate(&VecColum::from(data))?;
|
||||
|
||||
let mut out: Vec<u8> = Vec::new();
|
||||
Codec::serialize(&mut out, &VecColum::from(data)).unwrap();
|
||||
|
||||
let reader = R::open_from_bytes(&out).unwrap();
|
||||
for (doc, orig_val) in data.iter().enumerate() {
|
||||
let val = reader.get_u64(doc as u64, &out);
|
||||
if val != *orig_val {
|
||||
panic!(
|
||||
"val {:?} does not match orig_val {:?}, in data set {}, data {:?}",
|
||||
val, orig_val, name, data
|
||||
);
|
||||
}
|
||||
}
|
||||
let actual_compression = out.len() as f32 / (data.len() as f32 * 8.0);
|
||||
(estimation, actual_compression)
|
||||
|
||||
let reader = Codec::open_from_bytes(OwnedBytes::new(out)).unwrap();
|
||||
assert_eq!(reader.num_vals(), data.len() as u64);
|
||||
for (doc, orig_val) in data.iter().copied().enumerate() {
|
||||
let val = reader.get_val(doc as u64);
|
||||
assert_eq!(
|
||||
val, orig_val,
|
||||
"val `{val}` does not match orig_val {orig_val:?}, in data set {name}, data \
|
||||
`{data:?}`",
|
||||
);
|
||||
}
|
||||
Some((estimation, actual_compression))
|
||||
}
|
||||
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(100))]
|
||||
#[test]
|
||||
fn test_proptest_small(data in proptest::collection::vec(num_strategy(), 1..10)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#![proptest_config(ProptestConfig::with_cases(10))]
|
||||
#[test]
|
||||
fn test_proptest_large(data in proptest::collection::vec(num_strategy(), 1..6000)) {
|
||||
create_and_validate::<LinearCodec>(&data, "proptest linearinterpol");
|
||||
create_and_validate::<BlockwiseLinearCodec>(&data, "proptest multilinearinterpol");
|
||||
create_and_validate::<BitpackedCodec>(&data, "proptest bitpacked");
|
||||
}
|
||||
|
||||
}
|
||||
fn num_strategy() -> impl Strategy<Value = u64> {
|
||||
prop_oneof![
|
||||
1 => prop::num::u64::ANY.prop_map(|num| u64::MAX - (num % 10) ),
|
||||
1 => prop::num::u64::ANY.prop_map(|num| num % 10 ),
|
||||
20 => prop::num::u64::ANY,
|
||||
]
|
||||
}
|
||||
|
||||
pub fn get_codec_test_datasets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
let mut data_and_names = vec![];
|
||||
|
||||
let data = (10..=20_u64).collect::<Vec<_>>();
|
||||
let data = (10..=10_000_u64).collect::<Vec<_>>();
|
||||
data_and_names.push((data, "simple monotonically increasing"));
|
||||
|
||||
data_and_names.push((
|
||||
@@ -134,92 +195,93 @@ mod tests {
|
||||
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
|
||||
data_and_names.push((vec![10], "single value"));
|
||||
|
||||
data_and_names.push((
|
||||
vec![1572656989877777, 1170935903116329, 720575940379279, 0],
|
||||
"overflow error",
|
||||
));
|
||||
|
||||
data_and_names
|
||||
}
|
||||
|
||||
fn test_codec<S: FastFieldCodecSerializer, R: FastFieldCodecReader>() {
|
||||
let codec_name = S::NAME;
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let (estimate, actual) =
|
||||
crate::tests::create_and_validate::<S, R>(&data, data_set_name);
|
||||
let result = if estimate == f32::MAX {
|
||||
"Disabled".to_string()
|
||||
fn test_codec<C: FastFieldCodec>() {
|
||||
let codec_name = format!("{:?}", C::CODEC_TYPE);
|
||||
for (data, dataset_name) in get_codec_test_datasets() {
|
||||
let estimate_actual_opt: Option<(f32, f32)> =
|
||||
crate::tests::create_and_validate::<C>(&data, dataset_name);
|
||||
let result = if let Some((estimate, actual)) = estimate_actual_opt {
|
||||
format!("Estimate `{estimate}` Actual `{actual}`")
|
||||
} else {
|
||||
format!("Estimate {:?} Actual {:?} ", estimate, actual)
|
||||
"Disabled".to_string()
|
||||
};
|
||||
println!(
|
||||
"Codec {}, DataSet {}, {}",
|
||||
codec_name, data_set_name, result
|
||||
);
|
||||
println!("Codec {codec_name}, DataSet {dataset_name}, {result}");
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_bitpacking() {
|
||||
test_codec::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>();
|
||||
test_codec::<BitpackedCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_interpolation() {
|
||||
test_codec::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>();
|
||||
test_codec::<LinearCodec>();
|
||||
}
|
||||
#[test]
|
||||
fn test_codec_multi_interpolation() {
|
||||
test_codec::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>();
|
||||
test_codec::<BlockwiseLinearCodec>();
|
||||
}
|
||||
|
||||
use super::*;
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
let min_value = data.iter().cloned().min().unwrap_or(0);
|
||||
let max_value = data.iter().cloned().max().unwrap_or(0);
|
||||
FastFieldStats {
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: data.len() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn estimation_good_interpolation_case() {
|
||||
let data = (10..=20000_u64).collect::<Vec<_>>();
|
||||
let data: VecColum = data.as_slice().into();
|
||||
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.01);
|
||||
|
||||
let multi_linear_interpol_estimation =
|
||||
MultiLinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let multi_linear_interpol_estimation = BlockwiseLinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(multi_linear_interpol_estimation, 0.2);
|
||||
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, bitpacked_estimation);
|
||||
}
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case() {
|
||||
let data = vec![200, 10, 10, 10, 10, 1000, 20];
|
||||
let data: &[u64] = &[200, 10, 10, 10, 10, 1000, 20];
|
||||
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let data: VecColum = data.into();
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.32);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||
let mut data = (200..=20000_u64).collect::<Vec<_>>();
|
||||
let mut data: Vec<u64> = (200..=20000_u64).collect();
|
||||
data.push(1_000_000);
|
||||
let data: VecColum = data.as_slice().into();
|
||||
|
||||
// in this case the linear interpolation can't in fact not be worse than bitpacking,
|
||||
// but the estimator adds some threshold, which leads to estimated worse behavior
|
||||
let linear_interpol_estimation =
|
||||
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let linear_interpol_estimation = LinearCodec::estimate(&data).unwrap();
|
||||
assert_le!(linear_interpol_estimation, 0.35);
|
||||
|
||||
let bitpacked_estimation =
|
||||
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
|
||||
let bitpacked_estimation = BitpackedCodec::estimate(&data).unwrap();
|
||||
assert_le!(bitpacked_estimation, 0.32);
|
||||
assert_le!(bitpacked_estimation, linear_interpol_estimation);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fast_field_codec_type_to_code() {
|
||||
let mut count_codec = 0;
|
||||
for code in 0..=255 {
|
||||
if let Some(codec_type) = FastFieldCodecType::from_code(code) {
|
||||
assert_eq!(codec_type.to_code(), code);
|
||||
count_codec += 1;
|
||||
}
|
||||
}
|
||||
assert_eq!(count_codec, 4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,21 +2,23 @@ use std::io::{self, Read, Write};
|
||||
use std::ops::Sub;
|
||||
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use ownedbytes::OwnedBytes;
|
||||
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker};
|
||||
|
||||
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use crate::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
/// Depending on the field type, a different
|
||||
/// fast field is required.
|
||||
#[derive(Clone)]
|
||||
pub struct LinearInterpolFastFieldReader {
|
||||
pub struct LinearReader {
|
||||
data: OwnedBytes,
|
||||
bit_unpacker: BitUnpacker,
|
||||
pub footer: LinearInterpolFooter,
|
||||
pub footer: LinearFooter,
|
||||
pub slope: f32,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LinearInterpolFooter {
|
||||
pub struct LinearFooter {
|
||||
pub relative_max_value: u64,
|
||||
pub offset: u64,
|
||||
pub first_val: u64,
|
||||
@@ -26,7 +28,7 @@ pub struct LinearInterpolFooter {
|
||||
pub max_value: u64,
|
||||
}
|
||||
|
||||
impl BinarySerializable for LinearInterpolFooter {
|
||||
impl BinarySerializable for LinearFooter {
|
||||
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
|
||||
self.relative_max_value.serialize(write)?;
|
||||
self.offset.serialize(write)?;
|
||||
@@ -38,8 +40,8 @@ impl BinarySerializable for LinearInterpolFooter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
|
||||
Ok(LinearInterpolFooter {
|
||||
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearFooter> {
|
||||
Ok(LinearFooter {
|
||||
relative_max_value: u64::deserialize(reader)?,
|
||||
offset: u64::deserialize(reader)?,
|
||||
first_val: u64::deserialize(reader)?,
|
||||
@@ -51,29 +53,15 @@ impl BinarySerializable for LinearInterpolFooter {
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedSize for LinearInterpolFooter {
|
||||
impl FixedSize for LinearFooter {
|
||||
const SIZE_IN_BYTES: usize = 56;
|
||||
}
|
||||
|
||||
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
|
||||
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
|
||||
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
|
||||
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
|
||||
|
||||
let num_bits = compute_num_bits(footer.relative_max_value);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(LinearInterpolFastFieldReader {
|
||||
bit_unpacker,
|
||||
footer,
|
||||
slope,
|
||||
})
|
||||
}
|
||||
impl Column for LinearReader {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
|
||||
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
|
||||
(calculated_value + self.bit_unpacker.get(doc, &self.data)) - self.footer.offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -84,47 +72,87 @@ impl FastFieldCodecReader for LinearInterpolFastFieldReader {
|
||||
fn max_value(&self) -> u64 {
|
||||
self.footer.max_value
|
||||
}
|
||||
#[inline]
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.footer.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
/// Fastfield serializer, which tries to guess values by linear interpolation
|
||||
/// and stores the difference bitpacked.
|
||||
pub struct LinearInterpolFastFieldSerializer {}
|
||||
pub struct LinearCodec;
|
||||
|
||||
#[inline]
|
||||
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
pub(crate) fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
|
||||
if num_vals <= 1 {
|
||||
return 0.0;
|
||||
}
|
||||
// We calculate the slope with f64 high precision and use the result in lower precision f32
|
||||
// This is done in order to handle estimations for very large values like i64::MAX
|
||||
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
|
||||
let diff = diff(last_val, first_val);
|
||||
(diff / (num_vals - 1) as f64) as f32
|
||||
}
|
||||
|
||||
/// Delay the cast, to improve precision for very large u64 values.
|
||||
///
|
||||
/// Since i64 is mapped monotonically to u64 space, 0i64 is after the mapping i64::MAX.
|
||||
/// So very large values are not uncommon.
|
||||
///
|
||||
/// ```rust
|
||||
/// let val1 = i64::MAX;
|
||||
/// let val2 = i64::MAX - 100;
|
||||
/// assert_eq!(val1 - val2, 100);
|
||||
/// assert_eq!(val1 as f64 - val2 as f64, 0.0);
|
||||
/// ```
|
||||
fn diff(val1: u64, val2: u64) -> f64 {
|
||||
if val1 >= val2 {
|
||||
(val1 - val2) as f64
|
||||
} else {
|
||||
(val2 - val1) as f64 * -1.0
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
first_val + (pos as f32 * slope) as u64
|
||||
pub fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
|
||||
if slope < 0.0 {
|
||||
first_val.saturating_sub((pos as f32 * -slope) as u64)
|
||||
} else {
|
||||
first_val.saturating_add((pos as f32 * slope) as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
const NAME: &'static str = "LinearInterpol";
|
||||
const ID: u8 = 2;
|
||||
impl FastFieldCodec for LinearCodec {
|
||||
const CODEC_TYPE: FastFieldCodecType = FastFieldCodecType::Linear;
|
||||
|
||||
type Reader = LinearReader;
|
||||
|
||||
/// Opens a fast field given a file.
|
||||
fn open_from_bytes(bytes: OwnedBytes) -> io::Result<Self::Reader> {
|
||||
let footer_offset = bytes.len() - LinearFooter::SIZE_IN_BYTES;
|
||||
let (data, mut footer) = bytes.split(footer_offset);
|
||||
let footer = LinearFooter::deserialize(&mut footer)?;
|
||||
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
|
||||
let num_bits = compute_num_bits(footer.relative_max_value);
|
||||
let bit_unpacker = BitUnpacker::new(num_bits);
|
||||
Ok(LinearReader {
|
||||
data,
|
||||
bit_unpacker,
|
||||
footer,
|
||||
slope,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new fast field serializer.
|
||||
fn serialize(
|
||||
write: &mut impl Write,
|
||||
fastfield_accessor: &dyn FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
data_iter: impl Iterator<Item = u64>,
|
||||
data_iter1: impl Iterator<Item = u64>,
|
||||
) -> io::Result<()> {
|
||||
assert!(stats.min_value <= stats.max_value);
|
||||
fn serialize(write: &mut impl Write, fastfield_accessor: &dyn Column) -> io::Result<()> {
|
||||
assert!(fastfield_accessor.min_value() <= fastfield_accessor.max_value());
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, stats.num_vals);
|
||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
|
||||
// calculate offset to ensure all values are positive
|
||||
let mut offset = 0;
|
||||
let mut rel_positive_max = 0;
|
||||
for (pos, actual_value) in data_iter1.enumerate() {
|
||||
for (pos, actual_value) in fastfield_accessor.iter().enumerate() {
|
||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
||||
if calculated_value > actual_value {
|
||||
// negative value we need to apply an offset
|
||||
@@ -142,56 +170,55 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value);
|
||||
let mut bit_packer = BitPacker::new();
|
||||
for (pos, val) in data_iter.enumerate() {
|
||||
for (pos, val) in fastfield_accessor.iter().enumerate() {
|
||||
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
|
||||
let diff = (val + offset) - calculated_value;
|
||||
bit_packer.write(diff, num_bits, write)?;
|
||||
}
|
||||
bit_packer.close(write)?;
|
||||
|
||||
let footer = LinearInterpolFooter {
|
||||
let footer = LinearFooter {
|
||||
relative_max_value,
|
||||
offset,
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals: stats.num_vals,
|
||||
min_value: stats.min_value,
|
||||
max_value: stats.max_value,
|
||||
num_vals: fastfield_accessor.num_vals(),
|
||||
min_value: fastfield_accessor.min_value(),
|
||||
max_value: fastfield_accessor.max_value(),
|
||||
};
|
||||
footer.serialize(write)?;
|
||||
Ok(())
|
||||
}
|
||||
fn is_applicable(
|
||||
_fastfield_accessor: &impl FastFieldDataAccess,
|
||||
stats: FastFieldStats,
|
||||
) -> bool {
|
||||
if stats.num_vals < 3 {
|
||||
return false; // disable compressor for this case
|
||||
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima for the deviation of the calculated value are and
|
||||
/// the offset to shift all values to >=0 is also unknown.
|
||||
#[allow(clippy::question_mark)]
|
||||
fn estimate(fastfield_accessor: &impl Column) -> Option<f32> {
|
||||
if fastfield_accessor.num_vals() < 3 {
|
||||
return None; // disable compressor for this case
|
||||
}
|
||||
|
||||
// On serialisation the offset is added to the actual value.
|
||||
// We need to make sure this won't run into overflow calculation issues.
|
||||
// For this we take the maximum theroretical offset and add this to the max value.
|
||||
// If this doesn't overflow the algorithm should be fine
|
||||
let theorethical_maximum_offset = stats.max_value - stats.min_value;
|
||||
if stats
|
||||
.max_value
|
||||
let theorethical_maximum_offset =
|
||||
fastfield_accessor.max_value() - fastfield_accessor.min_value();
|
||||
if fastfield_accessor
|
||||
.max_value()
|
||||
.checked_add(theorethical_maximum_offset)
|
||||
.is_none()
|
||||
{
|
||||
return false;
|
||||
return None;
|
||||
}
|
||||
true
|
||||
}
|
||||
/// estimation for linear interpolation is hard because, you don't know
|
||||
/// where the local maxima for the deviation of the calculated value are and
|
||||
/// the offset to shift all values to >=0 is also unknown.
|
||||
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
|
||||
|
||||
let first_val = fastfield_accessor.get_val(0);
|
||||
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, stats.num_vals);
|
||||
let last_val = fastfield_accessor.get_val(fastfield_accessor.num_vals() as u64 - 1);
|
||||
let slope = get_slope(first_val, last_val, fastfield_accessor.num_vals());
|
||||
|
||||
// let's sample at 0%, 5%, 10% .. 95%, 100%
|
||||
let num_vals = stats.num_vals as f32 / 100.0;
|
||||
let num_vals = fastfield_accessor.num_vals() as f32 / 100.0;
|
||||
let sample_positions = (0..20)
|
||||
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
|
||||
.collect::<Vec<_>>();
|
||||
@@ -213,10 +240,11 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
|
||||
//
|
||||
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
|
||||
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
|
||||
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
|
||||
let num_bits_uncompressed = 64 * stats.num_vals;
|
||||
num_bits as f32 / num_bits_uncompressed as f32
|
||||
let num_bits = compute_num_bits(relative_max_value as u64) as u64
|
||||
* fastfield_accessor.num_vals()
|
||||
+ LinearFooter::SIZE_IN_BYTES as u64;
|
||||
let num_bits_uncompressed = 64 * fastfield_accessor.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,29 +259,48 @@ fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_data_sets;
|
||||
use rand::RngCore;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> (f32, f32) {
|
||||
crate::tests::create_and_validate::<
|
||||
LinearInterpolFastFieldSerializer,
|
||||
LinearInterpolFastFieldReader,
|
||||
>(data, name)
|
||||
use super::*;
|
||||
use crate::tests::get_codec_test_datasets;
|
||||
|
||||
fn create_and_validate(data: &[u64], name: &str) -> Option<(f32, f32)> {
|
||||
crate::tests::create_and_validate::<LinearCodec>(data, name)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_calculated_value_test() {
|
||||
// pos slope
|
||||
assert_eq!(get_calculated_value(100, 10, 5.0), 150);
|
||||
|
||||
// neg slope
|
||||
assert_eq!(get_calculated_value(100, 10, -5.0), 50);
|
||||
|
||||
// pos slope, very high values
|
||||
assert_eq!(
|
||||
get_calculated_value(i64::MAX as u64, 10, 5.0),
|
||||
i64::MAX as u64 + 50
|
||||
);
|
||||
// neg slope, very high values
|
||||
assert_eq!(
|
||||
get_calculated_value(i64::MAX as u64, 10, -5.0),
|
||||
i64::MAX as u64 - 50
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compression() {
|
||||
let data = (10..=6_000_u64).collect::<Vec<_>>();
|
||||
let (estimate, actual_compression) =
|
||||
create_and_validate(&data, "simple monotonically large");
|
||||
create_and_validate(&data, "simple monotonically large").unwrap();
|
||||
|
||||
assert!(actual_compression < 0.01);
|
||||
assert!(estimate < 0.01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_codec_data_sets() {
|
||||
let data_sets = get_codec_test_data_sets();
|
||||
fn test_with_codec_datasets() {
|
||||
let data_sets = get_codec_test_datasets();
|
||||
for (mut data, name) in data_sets {
|
||||
create_and_validate(&data, name);
|
||||
data.reverse();
|
||||
@@ -270,6 +317,13 @@ mod tests {
|
||||
|
||||
create_and_validate(&data, "large amplitude");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn overflow_error_test() {
|
||||
let data = vec![1572656989877777, 1170935903116329, 720575940379279, 0];
|
||||
create_and_validate(&data, "overflow test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_concave_data() {
|
||||
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
|
||||
@@ -289,10 +343,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn linear_interpol_fast_field_rand() {
|
||||
for _ in 0..5000 {
|
||||
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..50 {
|
||||
let mut data = (0..10_000).map(|_| rng.next_u64()).collect::<Vec<_>>();
|
||||
create_and_validate(&data, "random");
|
||||
|
||||
data.reverse();
|
||||
create_and_validate(&data, "random");
|
||||
}
|
||||
@@ -1,10 +1,35 @@
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::{FastFieldCodecSerializer, FastFieldStats};
|
||||
use fastfield_codecs::bitpacked::BitpackedCodec;
|
||||
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
|
||||
use fastfield_codecs::linear::LinearCodec;
|
||||
use fastfield_codecs::{Column, FastFieldCodec, FastFieldCodecType, FastFieldStats};
|
||||
use prettytable::{Cell, Row, Table};
|
||||
|
||||
struct Data<'a>(&'a [u64]);
|
||||
|
||||
impl<'a> Column for Data<'a> {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
self.0[position as usize]
|
||||
}
|
||||
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||
Box::new(self.0.iter().cloned())
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
*self.0.iter().min().unwrap_or(&0)
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
*self.0.iter().max().unwrap_or(&0)
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.0.len() as u64
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut table = Table::new();
|
||||
|
||||
@@ -12,41 +37,32 @@ fn main() {
|
||||
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
|
||||
|
||||
for (data, data_set_name) in get_codec_test_data_sets() {
|
||||
let mut results = vec![];
|
||||
let res = serialize_with_codec::<LinearInterpolFastFieldSerializer>(&data);
|
||||
results.push(res);
|
||||
let res = serialize_with_codec::<MultiLinearInterpolFastFieldSerializer>(&data);
|
||||
results.push(res);
|
||||
let res = serialize_with_codec::<fastfield_codecs::bitpacked::BitpackedFastFieldSerializer>(
|
||||
&data,
|
||||
);
|
||||
results.push(res);
|
||||
|
||||
// let best_estimation_codec = results
|
||||
//.iter()
|
||||
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
|
||||
//.unwrap();
|
||||
let results: Vec<(f32, f32, FastFieldCodecType)> = [
|
||||
serialize_with_codec::<LinearCodec>(&data),
|
||||
serialize_with_codec::<BlockwiseLinearCodec>(&data),
|
||||
serialize_with_codec::<BlockwiseLinearCodec>(&data),
|
||||
serialize_with_codec::<BitpackedCodec>(&data),
|
||||
]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
let best_compression_ratio_codec = results
|
||||
.iter()
|
||||
.min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
|
||||
.min_by(|&res1, &res2| res1.partial_cmp(res2).unwrap())
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
|
||||
for (is_applicable, est, comp, name) in results {
|
||||
let (est_cell, ratio_cell) = if !is_applicable {
|
||||
("Codec Disabled".to_string(), "".to_string())
|
||||
} else {
|
||||
(est.to_string(), comp.to_string())
|
||||
};
|
||||
for (est, comp, codec_type) in results {
|
||||
let est_cell = est.to_string();
|
||||
let ratio_cell = comp.to_string();
|
||||
let style = if comp == best_compression_ratio_codec.1 {
|
||||
"Fb"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
table.add_row(Row::new(vec![
|
||||
Cell::new(name).style_spec("bFg"),
|
||||
Cell::new(&format!("{codec_type:?}")).style_spec("bFg"),
|
||||
Cell::new(&ratio_cell).style_spec(style),
|
||||
Cell::new(&est_cell).style_spec(""),
|
||||
]));
|
||||
@@ -91,26 +107,15 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
|
||||
data_and_names
|
||||
}
|
||||
|
||||
pub fn serialize_with_codec<S: FastFieldCodecSerializer>(
|
||||
pub fn serialize_with_codec<C: FastFieldCodec>(
|
||||
data: &[u64],
|
||||
) -> (bool, f32, f32, &'static str) {
|
||||
let is_applicable = S::is_applicable(&data, stats_from_vec(data));
|
||||
if !is_applicable {
|
||||
return (false, 0.0, 0.0, S::NAME);
|
||||
}
|
||||
let estimation = S::estimate(&data, stats_from_vec(data));
|
||||
let mut out = vec![];
|
||||
S::serialize(
|
||||
&mut out,
|
||||
&data,
|
||||
stats_from_vec(data),
|
||||
data.iter().cloned(),
|
||||
data.iter().cloned(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
|
||||
(true, estimation, actual_compression, S::NAME)
|
||||
) -> Option<(f32, f32, FastFieldCodecType)> {
|
||||
let data = Data(data);
|
||||
let estimation = C::estimate(&data)?;
|
||||
let mut out = Vec::new();
|
||||
C::serialize(&mut out, &data).unwrap();
|
||||
let actual_compression = out.len() as f32 / (data.num_vals() * 8) as f32;
|
||||
Some((estimation, actual_compression, C::CODEC_TYPE))
|
||||
}
|
||||
|
||||
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::{fmt, io, mem};
|
||||
use stable_deref_trait::StableDeref;
|
||||
|
||||
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
|
||||
/// this data as a static slice.
|
||||
/// this data as a slice.
|
||||
///
|
||||
/// The backing object is required to be `StableDeref`.
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
|
||||
mod occur;
|
||||
mod query_grammar;
|
||||
mod user_input_ast;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -14,7 +15,7 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
IntermediateAggregationResults, IntermediateBucketResult, IntermediateHistogramBucketEntry,
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::SegmentAggregationResultsCollector;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::DynamicFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -331,10 +332,10 @@ impl SegmentHistogramCollector {
|
||||
.expect("unexpected fast field cardinatility");
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val0 = self.f64_from_fastfield_u64(accessor.get(docs[0]));
|
||||
let val1 = self.f64_from_fastfield_u64(accessor.get(docs[1]));
|
||||
let val2 = self.f64_from_fastfield_u64(accessor.get(docs[2]));
|
||||
let val3 = self.f64_from_fastfield_u64(accessor.get(docs[3]));
|
||||
let val0 = self.f64_from_fastfield_u64(accessor.get_val(docs[0] as u64));
|
||||
let val1 = self.f64_from_fastfield_u64(accessor.get_val(docs[1] as u64));
|
||||
let val2 = self.f64_from_fastfield_u64(accessor.get_val(docs[2] as u64));
|
||||
let val3 = self.f64_from_fastfield_u64(accessor.get_val(docs[3] as u64));
|
||||
|
||||
let bucket_pos0 = get_bucket_num(val0);
|
||||
let bucket_pos1 = get_bucket_num(val1);
|
||||
@@ -370,8 +371,8 @@ impl SegmentHistogramCollector {
|
||||
&bucket_with_accessor.sub_aggregation,
|
||||
)?;
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = f64_from_fastfield_u64(accessor.get(*doc), &self.field_type);
|
||||
for &doc in iter.remainder() {
|
||||
let val = f64_from_fastfield_u64(accessor.get_val(doc as u64), &self.field_type);
|
||||
if !bounds.contains(val) {
|
||||
continue;
|
||||
}
|
||||
@@ -382,7 +383,7 @@ impl SegmentHistogramCollector {
|
||||
self.buckets[bucket_pos].key,
|
||||
get_bucket_val(val, self.interval, self.offset) as f64
|
||||
);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
if let Some(sub_aggregations) = self.sub_aggregations.as_mut() {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use fnv::FnvHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -12,7 +13,6 @@ use crate::aggregation::intermediate_agg_result::{
|
||||
};
|
||||
use crate::aggregation::segment_agg_result::{BucketCount, SegmentAggregationResultsCollector};
|
||||
use crate::aggregation::{f64_from_fastfield_u64, f64_to_fastfield_u64, Key, SerializedKey};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -264,10 +264,10 @@ impl SegmentRangeCollector {
|
||||
.as_single()
|
||||
.expect("unexpected fast field cardinatility");
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = accessor.get(docs[0]);
|
||||
let val2 = accessor.get(docs[1]);
|
||||
let val3 = accessor.get(docs[2]);
|
||||
let val4 = accessor.get(docs[3]);
|
||||
let val1 = accessor.get_val(docs[0] as u64);
|
||||
let val2 = accessor.get_val(docs[1] as u64);
|
||||
let val3 = accessor.get_val(docs[2] as u64);
|
||||
let val4 = accessor.get_val(docs[3] as u64);
|
||||
let bucket_pos1 = self.get_bucket_pos(val1);
|
||||
let bucket_pos2 = self.get_bucket_pos(val2);
|
||||
let bucket_pos3 = self.get_bucket_pos(val3);
|
||||
@@ -278,10 +278,10 @@ impl SegmentRangeCollector {
|
||||
self.increment_bucket(bucket_pos3, docs[2], &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos4, docs[3], &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = accessor.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = accessor.get_val(doc as u64);
|
||||
let bucket_pos = self.get_bucket_pos(val);
|
||||
self.increment_bucket(bucket_pos, *doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?;
|
||||
}
|
||||
if force_flush {
|
||||
for bucket in &mut self.buckets {
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::DynamicFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::DocId;
|
||||
|
||||
@@ -60,10 +61,10 @@ impl SegmentAverageCollector {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get(docs[0]);
|
||||
let val2 = field.get(docs[1]);
|
||||
let val3 = field.get(docs[2]);
|
||||
let val4 = field.get(docs[3]);
|
||||
let val1 = field.get_val(docs[0] as u64);
|
||||
let val2 = field.get_val(docs[1] as u64);
|
||||
let val3 = field.get_val(docs[2] as u64);
|
||||
let val4 = field.get_val(docs[3] as u64);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
@@ -73,8 +74,8 @@ impl SegmentAverageCollector {
|
||||
self.data.collect(val3);
|
||||
self.data.collect(val4);
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = field.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc as u64);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.data.collect(val);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use fastfield_codecs::Column;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::aggregation::f64_from_fastfield_u64;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::DynamicFastFieldReader;
|
||||
use crate::schema::Type;
|
||||
use crate::{DocId, TantivyError};
|
||||
|
||||
@@ -166,10 +167,10 @@ impl SegmentStatsCollector {
|
||||
pub(crate) fn collect_block(&mut self, doc: &[DocId], field: &DynamicFastFieldReader<u64>) {
|
||||
let mut iter = doc.chunks_exact(4);
|
||||
for docs in iter.by_ref() {
|
||||
let val1 = field.get(docs[0]);
|
||||
let val2 = field.get(docs[1]);
|
||||
let val3 = field.get(docs[2]);
|
||||
let val4 = field.get(docs[3]);
|
||||
let val1 = field.get_val(docs[0] as u64);
|
||||
let val2 = field.get_val(docs[1] as u64);
|
||||
let val3 = field.get_val(docs[2] as u64);
|
||||
let val4 = field.get_val(docs[3] as u64);
|
||||
let val1 = f64_from_fastfield_u64(val1, &self.field_type);
|
||||
let val2 = f64_from_fastfield_u64(val2, &self.field_type);
|
||||
let val3 = f64_from_fastfield_u64(val3, &self.field_type);
|
||||
@@ -179,8 +180,8 @@ impl SegmentStatsCollector {
|
||||
self.stats.collect(val3);
|
||||
self.stats.collect(val4);
|
||||
}
|
||||
for doc in iter.remainder() {
|
||||
let val = field.get(*doc);
|
||||
for &doc in iter.remainder() {
|
||||
let val = field.get_val(doc as u64);
|
||||
let val = f64_from_fastfield_u64(val, &self.field_type);
|
||||
self.stats.collect(val);
|
||||
}
|
||||
|
||||
@@ -11,8 +11,10 @@
|
||||
// Importing tantivy...
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastValue};
|
||||
use crate::schema::Field;
|
||||
use crate::{Score, SegmentReader, TantivyError};
|
||||
|
||||
@@ -174,7 +176,7 @@ where
|
||||
type Fruit = TSegmentCollector::Fruit;
|
||||
|
||||
fn collect(&mut self, doc: u32, score: Score) {
|
||||
let value = self.fast_field_reader.get(doc);
|
||||
let value = self.fast_field_reader.get_val(doc as u64);
|
||||
if (self.predicate)(value) {
|
||||
self.segment_collector.collect(doc, score)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use fastdivide::DividerU64;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::{Collector, SegmentCollector};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastValue};
|
||||
use crate::schema::{Field, Type};
|
||||
use crate::{DocId, Score};
|
||||
|
||||
@@ -91,7 +92,7 @@ impl SegmentCollector for SegmentHistogramCollector {
|
||||
type Fruit = Vec<u64>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let value = self.ff_reader.get(doc);
|
||||
let value = self.ff_reader.get_val(doc as u64);
|
||||
self.histogram_computer.add_value(value);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::*;
|
||||
use crate::collector::{Count, FilterCollector, TopDocs};
|
||||
use crate::core::SegmentReader;
|
||||
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader};
|
||||
use crate::query::{AllQuery, QueryParser};
|
||||
use crate::schema::{Field, Schema, FAST, TEXT};
|
||||
use crate::time::format_description::well_known::Rfc3339;
|
||||
@@ -197,7 +199,7 @@ impl SegmentCollector for FastFieldSegmentCollector {
|
||||
type Fruit = Vec<u64>;
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let val = self.reader.get(doc);
|
||||
let val = self.reader.get_val(doc as u64);
|
||||
self.vals.push(val);
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ use std::collections::BinaryHeap;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::Collector;
|
||||
use crate::collector::custom_score_top_collector::CustomScoreTopCollector;
|
||||
use crate::collector::top_collector::{ComparableDoc, TopCollector, TopSegmentCollector};
|
||||
@@ -9,7 +11,7 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
||||
use crate::collector::{
|
||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||
};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastValue};
|
||||
use crate::query::Weight;
|
||||
use crate::schema::Field;
|
||||
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
|
||||
@@ -134,7 +136,7 @@ struct ScorerByFastFieldReader {
|
||||
|
||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
||||
fn score(&mut self, doc: DocId) -> u64 {
|
||||
self.ff_reader.get(doc)
|
||||
self.ff_reader.get_val(doc as u64)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -407,7 +409,7 @@ impl TopDocs {
|
||||
/// # use tantivy::query::QueryParser;
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::fastfield::FastFieldReader;
|
||||
/// use tantivy::fastfield::Column;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// fn create_schema() -> Schema {
|
||||
@@ -456,7 +458,7 @@ impl TopDocs {
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId, original_score: Score| {
|
||||
/// let popularity: u64 = popularity_reader.get(doc);
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
|
||||
/// // Well.. For the sake of the example we use a simple logarithm
|
||||
/// // function.
|
||||
/// let popularity_boost_score = ((2u64 + popularity) as Score).log2();
|
||||
@@ -515,7 +517,7 @@ impl TopDocs {
|
||||
/// use tantivy::SegmentReader;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
/// use tantivy::fastfield::FastFieldReader;
|
||||
/// use fastfield_codecs::Column;
|
||||
///
|
||||
/// # fn create_schema() -> Schema {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
@@ -567,8 +569,8 @@ impl TopDocs {
|
||||
///
|
||||
/// // We can now define our actual scoring function
|
||||
/// move |doc: DocId| {
|
||||
/// let popularity: u64 = popularity_reader.get(doc);
|
||||
/// let boosted: u64 = boosted_reader.get(doc);
|
||||
/// let popularity: u64 = popularity_reader.get_val(doc as u64);
|
||||
/// let boosted: u64 = boosted_reader.get_val(doc as u64);
|
||||
/// // Score do not have to be `f64` in tantivy.
|
||||
/// // Here we return a couple to get lexicographical order
|
||||
/// // for free.
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::sync::Arc;
|
||||
|
||||
use super::segment::Segment;
|
||||
use super::IndexSettings;
|
||||
use crate::core::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||
use crate::core::{
|
||||
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
|
||||
};
|
||||
@@ -16,7 +17,7 @@ use crate::directory::MmapDirectory;
|
||||
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
|
||||
use crate::error::{DataCorruption, TantivyError};
|
||||
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
|
||||
use crate::indexer::segment_updater::save_new_metas;
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||
use crate::schema::{Field, FieldType, Schema};
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
@@ -47,6 +48,34 @@ fn load_metas(
|
||||
.map_err(From::from)
|
||||
}
|
||||
|
||||
/// Save the index meta file.
|
||||
/// This operation is atomic :
|
||||
/// Either
|
||||
/// - it fails, in which case an error is returned,
|
||||
/// and the `meta.json` remains untouched,
|
||||
/// - it succeeds, and `meta.json` is written
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
fn save_new_metas(
|
||||
schema: Schema,
|
||||
index_settings: IndexSettings,
|
||||
directory: &dyn Directory,
|
||||
) -> crate::Result<()> {
|
||||
save_metas(
|
||||
&IndexMeta {
|
||||
index_settings,
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
},
|
||||
directory,
|
||||
)?;
|
||||
directory.sync_directory()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// IndexBuilder can be used to create an index.
|
||||
///
|
||||
/// Use in conjunction with `SchemaBuilder`. Global index settings
|
||||
@@ -135,6 +164,25 @@ impl IndexBuilder {
|
||||
self.create(mmap_directory)
|
||||
}
|
||||
|
||||
/// Dragons ahead!!!
|
||||
///
|
||||
/// The point of this API is to let users create a simple index with a single segment
|
||||
/// and without starting any thread.
|
||||
///
|
||||
/// Do not use this method if you are not sure what you are doing.
|
||||
///
|
||||
/// It expects an originally empty directory, and will not run any GC operation.
|
||||
#[doc(hidden)]
|
||||
pub fn single_segment_index_writer(
|
||||
self,
|
||||
dir: impl Into<Box<dyn Directory>>,
|
||||
mem_budget: usize,
|
||||
) -> crate::Result<SingleSegmentIndexWriter> {
|
||||
let index = self.create(dir)?;
|
||||
let index_simple_writer = SingleSegmentIndexWriter::new(index, mem_budget)?;
|
||||
Ok(index_simple_writer)
|
||||
}
|
||||
|
||||
/// Creates a new index in a temp directory.
|
||||
///
|
||||
/// The index will use the `MMapDirectory` in a newly created directory.
|
||||
@@ -580,10 +628,12 @@ impl fmt::Debug for Index {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::collector::Count;
|
||||
use crate::directory::{RamDirectory, WatchCallback};
|
||||
use crate::schema::{Field, Schema, INDEXED, TEXT};
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::{Field, IndexRecordOption, Schema, INDEXED, TEXT};
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy};
|
||||
use crate::{Directory, Index, IndexBuilder, IndexReader, IndexSettings, ReloadPolicy, Term};
|
||||
|
||||
#[test]
|
||||
fn test_indexer_for_field() {
|
||||
@@ -849,4 +899,28 @@ mod tests {
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_segment_index_writer() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let directory = RamDirectory::default();
|
||||
let mut single_segment_index_writer = Index::builder()
|
||||
.schema(schema)
|
||||
.single_segment_index_writer(directory, 10_000_000)?;
|
||||
for _ in 0..10 {
|
||||
let doc = doc!(text_field=>"hello");
|
||||
single_segment_index_writer.add_document(doc)?;
|
||||
}
|
||||
let index = single_segment_index_writer.finalize()?;
|
||||
let searcher = index.reader()?.searcher();
|
||||
let term_query = TermQuery::new(
|
||||
Term::from_field_text(text_field, "hello"),
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
let count = searcher.search(&term_query, &Count)?;
|
||||
assert_eq!(count, 10);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ mod segment;
|
||||
mod segment_component;
|
||||
mod segment_id;
|
||||
mod segment_reader;
|
||||
mod single_segment_index_writer;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
@@ -23,6 +24,7 @@ pub use self::segment::Segment;
|
||||
pub use self::segment_component::SegmentComponent;
|
||||
pub use self::segment_id::SegmentId;
|
||||
pub use self::segment_reader::SegmentReader;
|
||||
pub use self::single_segment_index_writer::SingleSegmentIndexWriter;
|
||||
|
||||
/// The meta file contains all the information about the list of segments and the schema
|
||||
/// of the index.
|
||||
|
||||
@@ -247,6 +247,14 @@ impl SearcherInner {
|
||||
generation: TrackedObject<SearcherGeneration>,
|
||||
doc_store_cache_size: usize,
|
||||
) -> io::Result<SearcherInner> {
|
||||
assert_eq!(
|
||||
&segment_readers
|
||||
.iter()
|
||||
.map(|reader| (reader.segment_id(), reader.delete_opstamp()))
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
generation.segments(),
|
||||
"Set of segments referenced by this Searcher and its SearcherGeneration must match"
|
||||
);
|
||||
let store_readers: Vec<StoreReader> = segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.get_store_reader(doc_store_cache_size))
|
||||
|
||||
47
src/core/single_segment_index_writer.rs
Normal file
47
src/core/single_segment_index_writer.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use crate::indexer::operation::AddOperation;
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::indexer::SegmentWriter;
|
||||
use crate::{Directory, Document, Index, IndexMeta, Opstamp, Segment};
|
||||
|
||||
#[doc(hidden)]
|
||||
pub struct SingleSegmentIndexWriter {
|
||||
segment_writer: SegmentWriter,
|
||||
segment: Segment,
|
||||
opstamp: Opstamp,
|
||||
}
|
||||
|
||||
impl SingleSegmentIndexWriter {
|
||||
pub fn new(index: Index, mem_budget: usize) -> crate::Result<Self> {
|
||||
let segment = index.new_segment();
|
||||
let segment_writer = SegmentWriter::for_segment(mem_budget, segment.clone())?;
|
||||
Ok(Self {
|
||||
segment_writer,
|
||||
segment,
|
||||
opstamp: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add_document(&mut self, document: Document) -> crate::Result<()> {
|
||||
let opstamp = self.opstamp;
|
||||
self.opstamp += 1;
|
||||
self.segment_writer
|
||||
.add_document(AddOperation { opstamp, document })
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> crate::Result<Index> {
|
||||
let max_doc = self.segment_writer.max_doc();
|
||||
self.segment_writer.finalize()?;
|
||||
let segment: Segment = self.segment.with_max_doc(max_doc);
|
||||
let index = segment.index();
|
||||
let index_meta = IndexMeta {
|
||||
index_settings: index.settings().clone(),
|
||||
segments: vec![segment.meta().clone()],
|
||||
schema: index.schema(),
|
||||
opstamp: 0,
|
||||
payload: None,
|
||||
};
|
||||
save_metas(&index_meta, index.directory())?;
|
||||
index.directory().sync_directory()?;
|
||||
Ok(segment.index().clone())
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,7 @@ impl BinarySerializable for FileAddr {
|
||||
/// A `CompositeWrite` is used to write a `CompositeFile`.
|
||||
pub struct CompositeWrite<W = WritePtr> {
|
||||
write: CountingWriter<W>,
|
||||
offsets: HashMap<FileAddr, u64>,
|
||||
offsets: Vec<(FileAddr, u64)>,
|
||||
}
|
||||
|
||||
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
@@ -47,7 +47,7 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn wrap(w: W) -> CompositeWrite<W> {
|
||||
CompositeWrite {
|
||||
write: CountingWriter::wrap(w),
|
||||
offsets: HashMap::new(),
|
||||
offsets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,8 +60,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
|
||||
let offset = self.write.written_bytes();
|
||||
let file_addr = FileAddr::new(field, idx);
|
||||
assert!(!self.offsets.contains_key(&file_addr));
|
||||
self.offsets.insert(file_addr, offset);
|
||||
assert!(!self.offsets.iter().any(|el| el.0 == file_addr));
|
||||
self.offsets.push((file_addr, offset));
|
||||
&mut self.write
|
||||
}
|
||||
|
||||
@@ -73,16 +73,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
|
||||
let footer_offset = self.write.written_bytes();
|
||||
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
|
||||
|
||||
let mut offset_fields: Vec<_> = self
|
||||
.offsets
|
||||
.iter()
|
||||
.map(|(file_addr, offset)| (*offset, *file_addr))
|
||||
.collect();
|
||||
|
||||
offset_fields.sort();
|
||||
|
||||
let mut prev_offset = 0;
|
||||
for (offset, file_addr) in offset_fields {
|
||||
for (file_addr, offset) in self.offsets {
|
||||
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
|
||||
file_addr.serialize(&mut self.write)?;
|
||||
prev_offset = offset;
|
||||
@@ -106,6 +98,14 @@ pub struct CompositeFile {
|
||||
offsets_index: HashMap<FileAddr, Range<usize>>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for CompositeFile {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("CompositeFile")
|
||||
.field("offsets_index", &self.offsets_index)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl CompositeFile {
|
||||
/// Opens a composite file stored in a given
|
||||
/// `FileSlice`.
|
||||
@@ -233,4 +233,56 @@ mod test {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_composite_file_bug() -> crate::Result<()> {
|
||||
let path = Path::new("test_path");
|
||||
let directory = RamDirectory::create();
|
||||
{
|
||||
let w = directory.open_write(path).unwrap();
|
||||
let mut composite_write = CompositeWrite::wrap(w);
|
||||
let mut write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 0);
|
||||
VInt(32431123u64).serialize(&mut write)?;
|
||||
write.flush()?;
|
||||
let write = composite_write.for_field_with_idx(Field::from_field_id(1u32), 1);
|
||||
write.flush()?;
|
||||
|
||||
let mut write = composite_write.for_field_with_idx(Field::from_field_id(0u32), 0);
|
||||
VInt(1_000_000).serialize(&mut write)?;
|
||||
write.flush()?;
|
||||
|
||||
composite_write.close()?;
|
||||
}
|
||||
{
|
||||
let r = directory.open_read(path)?;
|
||||
let composite_file = CompositeFile::open(&r)?;
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(1u32), 0)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let mut file0_buf = file.as_slice();
|
||||
let payload_0 = VInt::deserialize(&mut file0_buf)?.0;
|
||||
assert_eq!(file0_buf.len(), 0);
|
||||
assert_eq!(payload_0, 32431123u64);
|
||||
}
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(1u32), 1)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let file = file.as_slice();
|
||||
assert_eq!(file.len(), 0);
|
||||
}
|
||||
{
|
||||
let file = composite_file
|
||||
.open_read_with_idx(Field::from_field_id(0u32), 0)
|
||||
.unwrap()
|
||||
.read_bytes()?;
|
||||
let file = file.as_slice();
|
||||
assert_eq!(file.len(), 3);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::directory::{FileSlice, OwnedBytes};
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, MultiValueLength};
|
||||
use crate::fastfield::{DynamicFastFieldReader, MultiValueLength};
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for byte array fast fields
|
||||
@@ -28,8 +30,9 @@ impl BytesFastFieldReader {
|
||||
}
|
||||
|
||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||
let start = self.idx_reader.get(doc) as usize;
|
||||
let stop = self.idx_reader.get(doc + 1) as usize;
|
||||
let idx = doc as u64;
|
||||
let start = self.idx_reader.get_val(idx) as usize;
|
||||
let stop = self.idx_reader.get_val(idx + 1) as usize;
|
||||
(start, stop)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,80 +1,130 @@
|
||||
use std::io::{self, Write};
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::BinarySerializable;
|
||||
use fastdivide::DividerU64;
|
||||
use fastfield_codecs::FastFieldCodecReader;
|
||||
use gcd::Gcd;
|
||||
use fastfield_codecs::{Column, FastFieldCodec};
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
pub const GCD_DEFAULT: u64 = 1;
|
||||
pub const GCD_CODEC_ID: u8 = 4;
|
||||
|
||||
/// Wrapper for accessing a fastfield.
|
||||
///
|
||||
/// Holds the data and the codec to the read the data.
|
||||
#[derive(Clone)]
|
||||
pub struct GCDFastFieldCodec<CodecReader> {
|
||||
gcd: u64,
|
||||
min_value: u64,
|
||||
pub struct GCDReader<CodecReader: Column> {
|
||||
gcd_params: GCDParams,
|
||||
reader: CodecReader,
|
||||
}
|
||||
impl<C: FastFieldCodecReader + Clone> FastFieldCodecReader for GCDFastFieldCodec<C> {
|
||||
/// Opens a fast field given the bytes.
|
||||
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self> {
|
||||
let (header, mut footer) = bytes.split_at(bytes.len() - 16);
|
||||
let gcd = u64::deserialize(&mut footer)?;
|
||||
let min_value = u64::deserialize(&mut footer)?;
|
||||
let reader = C::open_from_bytes(header)?;
|
||||
|
||||
Ok(GCDFastFieldCodec {
|
||||
gcd,
|
||||
min_value,
|
||||
reader,
|
||||
})
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct GCDParams {
|
||||
gcd: u64,
|
||||
min_value: u64,
|
||||
num_vals: u64,
|
||||
}
|
||||
|
||||
impl GCDParams {
|
||||
pub fn eval(&self, val: u64) -> u64 {
|
||||
self.min_value + self.gcd * val
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for GCDParams {
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.gcd.serialize(writer)?;
|
||||
self.min_value.serialize(writer)?;
|
||||
self.num_vals.serialize(writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let gcd: u64 = u64::deserialize(reader)?;
|
||||
let min_value: u64 = u64::deserialize(reader)?;
|
||||
let num_vals: u64 = u64::deserialize(reader)?;
|
||||
Ok(Self {
|
||||
gcd,
|
||||
min_value,
|
||||
num_vals,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn open_gcd_from_bytes<WrappedCodec: FastFieldCodec>(
|
||||
bytes: OwnedBytes,
|
||||
) -> io::Result<GCDReader<WrappedCodec::Reader>> {
|
||||
let footer_offset = bytes.len() - 24;
|
||||
let (body, mut footer) = bytes.split(footer_offset);
|
||||
let gcd_params = GCDParams::deserialize(&mut footer)?;
|
||||
let reader: WrappedCodec::Reader = WrappedCodec::open_from_bytes(body)?;
|
||||
Ok(GCDReader { gcd_params, reader })
|
||||
}
|
||||
|
||||
impl<C: Column + Clone> Column for GCDReader<C> {
|
||||
#[inline]
|
||||
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
|
||||
let mut data = self.reader.get_u64(doc, data);
|
||||
data *= self.gcd;
|
||||
data += self.min_value;
|
||||
data
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
let val = self.reader.get_val(doc);
|
||||
self.gcd_params.eval(val)
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value + self.reader.min_value() * self.gcd
|
||||
self.gcd_params.eval(self.reader.min_value())
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.min_value + self.reader.max_value() * self.gcd
|
||||
self.gcd_params.eval(self.reader.max_value())
|
||||
}
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.gcd_params.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_gcd_header<W: Write>(field_write: &mut W, min_value: u64, gcd: u64) -> io::Result<()> {
|
||||
pub fn write_gcd_header<W: Write>(
|
||||
field_write: &mut W,
|
||||
min_value: u64,
|
||||
gcd: u64,
|
||||
num_vals: u64,
|
||||
) -> io::Result<()> {
|
||||
gcd.serialize(field_write)?;
|
||||
min_value.serialize(field_write)?;
|
||||
num_vals.serialize(field_write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compute the gcd of two non null numbers.
|
||||
///
|
||||
/// It is recommended, but not required, to feed values such that `large >= small`.
|
||||
fn compute_gcd(mut large: NonZeroU64, mut small: NonZeroU64) -> NonZeroU64 {
|
||||
loop {
|
||||
let rem: u64 = large.get() % small;
|
||||
if let Some(new_small) = NonZeroU64::new(rem) {
|
||||
(large, small) = (small, new_small);
|
||||
} else {
|
||||
return small;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find GCD for iterator of numbers
|
||||
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<u64> {
|
||||
let mut numbers = numbers.filter(|n| *n != 0);
|
||||
let mut gcd = numbers.next()?;
|
||||
if gcd == 1 {
|
||||
return Some(1);
|
||||
pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<NonZeroU64> {
|
||||
let mut numbers = numbers.flat_map(NonZeroU64::new);
|
||||
let mut gcd: NonZeroU64 = numbers.next()?;
|
||||
if gcd.get() == 1 {
|
||||
return Some(gcd);
|
||||
}
|
||||
|
||||
let mut gcd_divider = DividerU64::divide_by(gcd);
|
||||
let mut gcd_divider = DividerU64::divide_by(gcd.get());
|
||||
for val in numbers {
|
||||
let remainder = val - (gcd_divider.divide(val)) * gcd;
|
||||
let remainder = val.get() - (gcd_divider.divide(val.get())) * gcd.get();
|
||||
if remainder == 0 {
|
||||
continue;
|
||||
}
|
||||
gcd = gcd.gcd(val);
|
||||
if gcd == 1 {
|
||||
return Some(1);
|
||||
gcd = compute_gcd(val, gcd);
|
||||
if gcd.get() == 1 {
|
||||
return Some(gcd);
|
||||
}
|
||||
|
||||
gcd_divider = DividerU64::divide_by(gcd);
|
||||
gcd_divider = DividerU64::divide_by(gcd.get());
|
||||
}
|
||||
Some(gcd)
|
||||
}
|
||||
@@ -82,19 +132,23 @@ pub fn find_gcd(numbers: impl Iterator<Item = u64>) -> Option<u64> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
use std::num::NonZeroU64;
|
||||
use std::path::Path;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use common::HasLen;
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::directory::{CompositeFile, RamDirectory, WritePtr};
|
||||
use crate::fastfield::gcd::compute_gcd;
|
||||
use crate::fastfield::serializer::FastFieldCodecEnableCheck;
|
||||
use crate::fastfield::tests::{FIELD, FIELDI64, SCHEMA, SCHEMAI64};
|
||||
use crate::fastfield::{
|
||||
find_gcd, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldCodecName,
|
||||
FastFieldReader, FastFieldsWriter, ALL_CODECS,
|
||||
find_gcd, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldCodecType,
|
||||
FastFieldsWriter, ALL_CODECS,
|
||||
};
|
||||
use crate::schema::Schema;
|
||||
use crate::Directory;
|
||||
use crate::schema::{Cardinality, Schema};
|
||||
use crate::{DateOptions, DatePrecision, DateTime, Directory};
|
||||
|
||||
fn get_index(
|
||||
docs: &[crate::Document],
|
||||
@@ -120,33 +174,33 @@ mod tests {
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_i64_with_codec(
|
||||
codec_name: FastFieldCodecName,
|
||||
code_type: FastFieldCodecType,
|
||||
num_vals: usize,
|
||||
) -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
let mut docs = vec![];
|
||||
for i in 1..=num_vals {
|
||||
let val = i as i64 * 1000i64;
|
||||
let val = (i as i64 - 5) * 1000i64;
|
||||
docs.push(doc!(*FIELDI64=>val));
|
||||
}
|
||||
let directory = get_index(&docs, &SCHEMAI64, codec_name.clone().into())?;
|
||||
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
|
||||
let file = directory.open_read(path).unwrap();
|
||||
// assert_eq!(file.len(), 118);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<i64>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), 1000i64);
|
||||
assert_eq!(fast_field_reader.get(1), 2000i64);
|
||||
assert_eq!(fast_field_reader.get(2), 3000i64);
|
||||
assert_eq!(fast_field_reader.max_value(), num_vals as i64 * 1000);
|
||||
assert_eq!(fast_field_reader.min_value(), 1000i64);
|
||||
|
||||
assert_eq!(fast_field_reader.get_val(0), -4000i64);
|
||||
assert_eq!(fast_field_reader.get_val(1), -3000i64);
|
||||
assert_eq!(fast_field_reader.get_val(2), -2000i64);
|
||||
assert_eq!(fast_field_reader.max_value(), (num_vals as i64 - 5) * 1000);
|
||||
assert_eq!(fast_field_reader.min_value(), -4000i64);
|
||||
let file = directory.open_read(path).unwrap();
|
||||
|
||||
// Can't apply gcd
|
||||
let path = Path::new("test");
|
||||
docs.pop();
|
||||
docs.push(doc!(*FIELDI64=>2001i64));
|
||||
let directory = get_index(&docs, &SCHEMAI64, codec_name.into())?;
|
||||
let directory = get_index(&docs, &SCHEMAI64, code_type.into())?;
|
||||
let file2 = directory.open_read(path).unwrap();
|
||||
assert!(file2.len() > file.len());
|
||||
|
||||
@@ -155,14 +209,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_i64() -> crate::Result<()> {
|
||||
for codec_name in ALL_CODECS {
|
||||
test_fastfield_gcd_i64_with_codec(codec_name.clone(), 5005)?;
|
||||
for &code_type in ALL_CODECS {
|
||||
test_fastfield_gcd_i64_with_codec(code_type, 5500)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_fastfield_gcd_u64_with_codec(
|
||||
codec_name: FastFieldCodecName,
|
||||
code_type: FastFieldCodecType,
|
||||
num_vals: usize,
|
||||
) -> crate::Result<()> {
|
||||
let path = Path::new("test");
|
||||
@@ -171,15 +225,14 @@ mod tests {
|
||||
let val = i as u64 * 1000u64;
|
||||
docs.push(doc!(*FIELD=>val));
|
||||
}
|
||||
let directory = get_index(&docs, &SCHEMA, codec_name.clone().into())?;
|
||||
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
|
||||
let file = directory.open_read(path).unwrap();
|
||||
// assert_eq!(file.len(), 118);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), 1000u64);
|
||||
assert_eq!(fast_field_reader.get(1), 2000u64);
|
||||
assert_eq!(fast_field_reader.get(2), 3000u64);
|
||||
assert_eq!(fast_field_reader.get_val(0), 1000u64);
|
||||
assert_eq!(fast_field_reader.get_val(1), 2000u64);
|
||||
assert_eq!(fast_field_reader.get_val(2), 3000u64);
|
||||
assert_eq!(fast_field_reader.max_value(), num_vals as u64 * 1000);
|
||||
assert_eq!(fast_field_reader.min_value(), 1000u64);
|
||||
let file = directory.open_read(path).unwrap();
|
||||
@@ -188,7 +241,7 @@ mod tests {
|
||||
let path = Path::new("test");
|
||||
docs.pop();
|
||||
docs.push(doc!(*FIELDI64=>2001u64));
|
||||
let directory = get_index(&docs, &SCHEMA, codec_name.into())?;
|
||||
let directory = get_index(&docs, &SCHEMA, code_type.into())?;
|
||||
let file2 = directory.open_read(path).unwrap();
|
||||
assert!(file2.len() > file.len());
|
||||
|
||||
@@ -197,8 +250,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_fastfield_gcd_u64() -> crate::Result<()> {
|
||||
for codec_name in ALL_CODECS {
|
||||
test_fastfield_gcd_u64_with_codec(codec_name.clone(), 5005)?;
|
||||
for &code_type in ALL_CODECS {
|
||||
test_fastfield_gcd_u64_with_codec(code_type, 5500)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -206,19 +259,103 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_fastfield2() {
|
||||
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
|
||||
assert_eq!(test_fastfield.get(0), 100);
|
||||
assert_eq!(test_fastfield.get(1), 200);
|
||||
assert_eq!(test_fastfield.get(2), 300);
|
||||
assert_eq!(test_fastfield.get_val(0), 100);
|
||||
assert_eq!(test_fastfield.get_val(1), 200);
|
||||
assert_eq!(test_fastfield.get_val(2), 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_gcd_date() -> crate::Result<()> {
|
||||
let size_prec_sec =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Seconds)?;
|
||||
let size_prec_micro =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Bitpacked, DatePrecision::Microseconds)?;
|
||||
assert!(size_prec_sec < size_prec_micro);
|
||||
|
||||
let size_prec_sec =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Seconds)?;
|
||||
let size_prec_micro =
|
||||
test_gcd_date_with_codec(FastFieldCodecType::Linear, DatePrecision::Microseconds)?;
|
||||
assert!(size_prec_sec < size_prec_micro);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_gcd_date_with_codec(
|
||||
codec_type: FastFieldCodecType,
|
||||
precision: DatePrecision,
|
||||
) -> crate::Result<usize> {
|
||||
let time1 = DateTime::from_timestamp_micros(
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as i64,
|
||||
);
|
||||
let time2 = DateTime::from_timestamp_micros(
|
||||
SystemTime::now()
|
||||
.checked_sub(Duration::from_micros(4111))
|
||||
.unwrap()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as i64,
|
||||
);
|
||||
|
||||
let time3 = DateTime::from_timestamp_micros(
|
||||
SystemTime::now()
|
||||
.checked_sub(Duration::from_millis(2000))
|
||||
.unwrap()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs() as i64,
|
||||
);
|
||||
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_options = DateOptions::default()
|
||||
.set_fast(Cardinality::SingleValue)
|
||||
.set_precision(precision);
|
||||
let field = schema_builder.add_date_field("field", date_options);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let docs = vec![doc!(field=>time1), doc!(field=>time2), doc!(field=>time3)];
|
||||
|
||||
let directory = get_index(&docs, &schema, codec_type.into())?;
|
||||
let path = Path::new("test");
|
||||
let file = directory.open_read(path).unwrap();
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let len = file.len();
|
||||
let test_fastfield = DynamicFastFieldReader::<DateTime>::open(file)?;
|
||||
|
||||
assert_eq!(test_fastfield.get_val(0), time1.truncate(precision));
|
||||
assert_eq!(test_fastfield.get_val(1), time2.truncate(precision));
|
||||
assert_eq!(test_fastfield.get_val(2), time3.truncate(precision));
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_gcd() {
|
||||
let test_compute_gcd_aux = |large, small, expected| {
|
||||
let large = NonZeroU64::new(large).unwrap();
|
||||
let small = NonZeroU64::new(small).unwrap();
|
||||
let expected = NonZeroU64::new(expected).unwrap();
|
||||
assert_eq!(compute_gcd(small, large), expected);
|
||||
assert_eq!(compute_gcd(large, small), expected);
|
||||
};
|
||||
test_compute_gcd_aux(1, 4, 1);
|
||||
test_compute_gcd_aux(2, 4, 2);
|
||||
test_compute_gcd_aux(10, 25, 5);
|
||||
test_compute_gcd_aux(25, 25, 25);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_gcd_test() {
|
||||
assert_eq!(find_gcd([0].into_iter()), None);
|
||||
assert_eq!(find_gcd([0, 10].into_iter()), Some(10));
|
||||
assert_eq!(find_gcd([10, 0].into_iter()), Some(10));
|
||||
assert_eq!(find_gcd([0, 10].into_iter()), NonZeroU64::new(10));
|
||||
assert_eq!(find_gcd([10, 0].into_iter()), NonZeroU64::new(10));
|
||||
assert_eq!(find_gcd([].into_iter()), None);
|
||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), Some(5));
|
||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), Some(1));
|
||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), Some(5));
|
||||
assert_eq!(find_gcd([15, 30, 5, 10].into_iter()), NonZeroU64::new(5));
|
||||
assert_eq!(find_gcd([15, 16, 10].into_iter()), NonZeroU64::new(1));
|
||||
assert_eq!(find_gcd([0, 5, 5, 5].into_iter()), NonZeroU64::new(5));
|
||||
assert_eq!(find_gcd([0, 0].into_iter()), None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,16 +20,18 @@
|
||||
//!
|
||||
//! Read access performance is comparable to that of an array lookup.
|
||||
|
||||
use fastfield_codecs::FastFieldCodecType;
|
||||
|
||||
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet};
|
||||
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
|
||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||
pub use self::facet_reader::FacetReader;
|
||||
pub(crate) use self::gcd::{find_gcd, GCDFastFieldCodec, GCD_CODEC_ID, GCD_DEFAULT};
|
||||
pub(crate) use self::gcd::{find_gcd, GCDReader, GCD_DEFAULT};
|
||||
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
||||
pub use self::reader::{DynamicFastFieldReader, FastFieldReader};
|
||||
pub use self::reader::DynamicFastFieldReader;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||
pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
pub use self::serializer::{Column, CompositeFastFieldSerializer, FastFieldStats};
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
use crate::schema::{Cardinality, FieldType, Type, Value};
|
||||
use crate::{DateTime, DocId};
|
||||
@@ -45,16 +47,10 @@ mod readers;
|
||||
mod serializer;
|
||||
mod writer;
|
||||
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
|
||||
pub(crate) enum FastFieldCodecName {
|
||||
Bitpacked,
|
||||
LinearInterpol,
|
||||
BlockwiseLinearInterpol,
|
||||
}
|
||||
pub(crate) const ALL_CODECS: &[FastFieldCodecName; 3] = &[
|
||||
FastFieldCodecName::Bitpacked,
|
||||
FastFieldCodecName::LinearInterpol,
|
||||
FastFieldCodecName::BlockwiseLinearInterpol,
|
||||
pub(crate) const ALL_CODECS: &[FastFieldCodecType; 3] = &[
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::Linear,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
];
|
||||
|
||||
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
||||
@@ -302,9 +298,9 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_fastfield() {
|
||||
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
|
||||
assert_eq!(test_fastfield.get(0), 100);
|
||||
assert_eq!(test_fastfield.get(1), 200);
|
||||
assert_eq!(test_fastfield.get(2), 300);
|
||||
assert_eq!(test_fastfield.get_val(0u64), 100);
|
||||
assert_eq!(test_fastfield.get_val(1u64), 200);
|
||||
assert_eq!(test_fastfield.get_val(2u64), 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -330,13 +326,13 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 37);
|
||||
assert_eq!(file.len(), 45);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), 13u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14u64);
|
||||
assert_eq!(fast_field_reader.get(2), 2u64);
|
||||
assert_eq!(fast_field_reader.get_val(0), 13u64);
|
||||
assert_eq!(fast_field_reader.get_val(1), 14u64);
|
||||
assert_eq!(fast_field_reader.get_val(2), 2u64);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -361,20 +357,20 @@ mod tests {
|
||||
serializer.close()?;
|
||||
}
|
||||
let file = directory.open_read(path)?;
|
||||
assert_eq!(file.len(), 62);
|
||||
assert_eq!(file.len(), 70);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
||||
assert_eq!(fast_field_reader.get(0), 4u64);
|
||||
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
|
||||
assert_eq!(fast_field_reader.get(2), 3_052u64);
|
||||
assert_eq!(fast_field_reader.get(3), 9002u64);
|
||||
assert_eq!(fast_field_reader.get(4), 15_001u64);
|
||||
assert_eq!(fast_field_reader.get(5), 777u64);
|
||||
assert_eq!(fast_field_reader.get(6), 1_002u64);
|
||||
assert_eq!(fast_field_reader.get(7), 1_501u64);
|
||||
assert_eq!(fast_field_reader.get(8), 215u64);
|
||||
assert_eq!(fast_field_reader.get_val(0), 4u64);
|
||||
assert_eq!(fast_field_reader.get_val(1), 14_082_001u64);
|
||||
assert_eq!(fast_field_reader.get_val(2), 3_052u64);
|
||||
assert_eq!(fast_field_reader.get_val(3), 9002u64);
|
||||
assert_eq!(fast_field_reader.get_val(4), 15_001u64);
|
||||
assert_eq!(fast_field_reader.get_val(5), 777u64);
|
||||
assert_eq!(fast_field_reader.get_val(6), 1_002u64);
|
||||
assert_eq!(fast_field_reader.get_val(7), 1_501u64);
|
||||
assert_eq!(fast_field_reader.get_val(8), 215u64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -397,13 +393,13 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 35);
|
||||
assert_eq!(file.len(), 43);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
||||
for doc in 0..10_000 {
|
||||
assert_eq!(fast_field_reader.get(doc), 100_000u64);
|
||||
assert_eq!(fast_field_reader.get_val(doc), 100_000u64);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -429,15 +425,15 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 80043);
|
||||
assert_eq!(file.len(), 80051);
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
||||
assert_eq!(fast_field_reader.get(0), 0u64);
|
||||
assert_eq!(fast_field_reader.get_val(0), 0u64);
|
||||
for doc in 1..10_001 {
|
||||
assert_eq!(
|
||||
fast_field_reader.get(doc),
|
||||
fast_field_reader.get_val(doc),
|
||||
5_000_000_000_000_000_000u64 + doc as u64 - 1u64
|
||||
);
|
||||
}
|
||||
@@ -469,7 +465,8 @@ mod tests {
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
// assert_eq!(file.len(), 17710 as usize); //bitpacked size
|
||||
assert_eq!(file.len(), 10175_usize); // linear interpol size
|
||||
// assert_eq!(file.len(), 10175_usize); // linear interpol size
|
||||
assert_eq!(file.len(), 75_usize); // linear interpol size after calc improvement
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file)?;
|
||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||
@@ -478,7 +475,7 @@ mod tests {
|
||||
assert_eq!(fast_field_reader.min_value(), -100i64);
|
||||
assert_eq!(fast_field_reader.max_value(), 9_999i64);
|
||||
for (doc, i) in (-100i64..10_000i64).enumerate() {
|
||||
assert_eq!(fast_field_reader.get(doc as u32), i);
|
||||
assert_eq!(fast_field_reader.get_val(doc as u64), i);
|
||||
}
|
||||
let mut buffer = vec![0i64; 100];
|
||||
fast_field_reader.get_range(53, &mut buffer[..]);
|
||||
@@ -514,7 +511,7 @@ mod tests {
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(i64_field).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?;
|
||||
assert_eq!(fast_field_reader.get(0u32), 0i64);
|
||||
assert_eq!(fast_field_reader.get_val(0), 0i64);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -554,7 +551,7 @@ mod tests {
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
|
||||
|
||||
for a in 0..n {
|
||||
assert_eq!(fast_field_reader.get(a as u32), permutation[a as usize]);
|
||||
assert_eq!(fast_field_reader.get_val(a as u64), permutation[a as usize]);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -845,19 +842,19 @@ mod tests {
|
||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||
let mut dates = vec![];
|
||||
{
|
||||
assert_eq!(date_fast_field.get(0u32).into_timestamp_micros(), 1i64);
|
||||
assert_eq!(date_fast_field.get_val(0).into_timestamp_micros(), 1i64);
|
||||
dates_fast_field.get_vals(0u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 2i64);
|
||||
assert_eq!(dates[1].into_timestamp_micros(), 3i64);
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(1u32).into_timestamp_micros(), 4i64);
|
||||
assert_eq!(date_fast_field.get_val(1).into_timestamp_micros(), 4i64);
|
||||
dates_fast_field.get_vals(1u32, &mut dates);
|
||||
assert!(dates.is_empty());
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(2u32).into_timestamp_micros(), 0i64);
|
||||
assert_eq!(date_fast_field.get_val(2).into_timestamp_micros(), 0i64);
|
||||
dates_fast_field.get_vals(2u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].into_timestamp_micros(), 5i64);
|
||||
@@ -869,10 +866,10 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_fastfield_bool() {
|
||||
let test_fastfield = DynamicFastFieldReader::<bool>::from(vec![true, false, true, false]);
|
||||
assert_eq!(test_fastfield.get(0), true);
|
||||
assert_eq!(test_fastfield.get(1), false);
|
||||
assert_eq!(test_fastfield.get(2), true);
|
||||
assert_eq!(test_fastfield.get(3), false);
|
||||
assert_eq!(test_fastfield.get_val(0), true);
|
||||
assert_eq!(test_fastfield.get_val(1), false);
|
||||
assert_eq!(test_fastfield.get_val(2), true);
|
||||
assert_eq!(test_fastfield.get_val(3), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -899,14 +896,14 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 36);
|
||||
assert_eq!(file.len(), 44);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(field).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), true);
|
||||
assert_eq!(fast_field_reader.get(1), false);
|
||||
assert_eq!(fast_field_reader.get(2), true);
|
||||
assert_eq!(fast_field_reader.get(3), false);
|
||||
assert_eq!(fast_field_reader.get_val(0), true);
|
||||
assert_eq!(fast_field_reader.get_val(1), false);
|
||||
assert_eq!(fast_field_reader.get_val(2), true);
|
||||
assert_eq!(fast_field_reader.get_val(3), false);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -935,13 +932,13 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 48);
|
||||
assert_eq!(file.len(), 56);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(field).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
||||
for i in 0..25 {
|
||||
assert_eq!(fast_field_reader.get(i * 2), true);
|
||||
assert_eq!(fast_field_reader.get(i * 2 + 1), false);
|
||||
assert_eq!(fast_field_reader.get_val(i * 2), true);
|
||||
assert_eq!(fast_field_reader.get_val(i * 2 + 1), false);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -969,11 +966,11 @@ mod tests {
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(path).unwrap();
|
||||
assert_eq!(file.len(), 35);
|
||||
assert_eq!(file.len(), 43);
|
||||
let composite_file = CompositeFile::open(&file)?;
|
||||
let file = composite_file.open_read(field).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<bool>::open(file)?;
|
||||
assert_eq!(fast_field_reader.get(0), false);
|
||||
assert_eq!(fast_field_reader.get_val(0), false);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -981,37 +978,20 @@ mod tests {
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench {
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use test::{self, Bencher};
|
||||
|
||||
use super::tests::{generate_permutation, FIELD, SCHEMA};
|
||||
use super::tests::generate_permutation;
|
||||
use super::*;
|
||||
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
|
||||
use crate::fastfield::tests::generate_permutation_gcd;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_linear_veclookup(b: &mut Bencher) {
|
||||
fn bench_intfastfield_jumpy_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let n = test::black_box(7000u32);
|
||||
let mut a = 0u64;
|
||||
for i in (0u32..n / 7).map(|v| v * 7) {
|
||||
a ^= permutation[i as usize];
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_veclookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let n = test::black_box(1000u32);
|
||||
let mut a = 0u64;
|
||||
for _ in 0u32..n {
|
||||
for _ in 0..n {
|
||||
a = permutation[a as usize];
|
||||
}
|
||||
a
|
||||
@@ -1019,102 +999,83 @@ mod bench {
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_linear_fflookup(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
fn bench_intfastfield_jumpy_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for &x in &permutation {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||
let n = permutation.len();
|
||||
let column = DynamicFastFieldReader::from(permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for _ in 0..n {
|
||||
a = column.get_val(a as u64);
|
||||
}
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let n = test::black_box(7000u32);
|
||||
let mut a = 0u64;
|
||||
for i in (0u32..n / 7).map(|val| val * 7) {
|
||||
a ^= fast_field_reader.get(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_fflookup(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
fn bench_intfastfield_stride7_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for &x in &permutation {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||
let n = permutation.len();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += permutation[i as usize];
|
||||
}
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u32;
|
||||
for i in 0u32..permutation.len() as u32 {
|
||||
a = fast_field_reader.get(i) as u32;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_fflookup_gcd(b: &mut Bencher) {
|
||||
let path = Path::new("test");
|
||||
fn bench_intfastfield_stride7_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column = DynamicFastFieldReader::from(permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in (0..n / 7).map(|val| val * 7) {
|
||||
a += column.get_val(i as u64);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
let n = permutation.len();
|
||||
let column = DynamicFastFieldReader::from(permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0u64..n as u64 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_fflookup_gcd(b: &mut Bencher) {
|
||||
let permutation = generate_permutation_gcd();
|
||||
let directory: RamDirectory = RamDirectory::create();
|
||||
{
|
||||
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
|
||||
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
|
||||
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
|
||||
for &x in &permutation {
|
||||
fast_field_writers.add_document(&doc!(*FIELD=>x));
|
||||
let n = permutation.len();
|
||||
let column = DynamicFastFieldReader::from(permutation);
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..n as u64 {
|
||||
a += column.get_val(i);
|
||||
}
|
||||
fast_field_writers
|
||||
.serialize(&mut serializer, &HashMap::new(), None)
|
||||
.unwrap();
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
let file = directory.open_read(&path).unwrap();
|
||||
{
|
||||
let fast_fields_composite = CompositeFile::open(&file).unwrap();
|
||||
let data = fast_fields_composite.open_read(*FIELD).unwrap();
|
||||
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
|
||||
a
|
||||
});
|
||||
}
|
||||
|
||||
b.iter(|| {
|
||||
let mut a = 0u32;
|
||||
for i in 0u32..permutation.len() as u32 {
|
||||
a = fast_field_reader.get(i) as u32;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
#[bench]
|
||||
fn bench_intfastfield_scan_all_vec(b: &mut Bencher) {
|
||||
let permutation = generate_permutation();
|
||||
b.iter(|| {
|
||||
let mut a = 0u64;
|
||||
for i in 0..permutation.len() {
|
||||
a += permutation[i as usize] as u64;
|
||||
}
|
||||
a
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue, MultiValueLength};
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastValue, MultiValueLength};
|
||||
use crate::DocId;
|
||||
|
||||
/// Reader for a multivalued `u64` fast field.
|
||||
@@ -31,8 +33,9 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
/// to the given document are `start..end`.
|
||||
#[inline]
|
||||
fn range(&self, doc: DocId) -> Range<u64> {
|
||||
let start = self.idx_reader.get(doc);
|
||||
let end = self.idx_reader.get(doc + 1);
|
||||
let idx = doc as u64;
|
||||
let start = self.idx_reader.get_val(idx);
|
||||
let end = self.idx_reader.get_val(idx + 1);
|
||||
start..end
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::io;
|
||||
use fnv::FnvHashMap;
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy;
|
||||
use crate::fastfield::serializer::BitpackedSerializerLegacy;
|
||||
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer, FastFieldType, FastValue};
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::UnorderedTermId;
|
||||
@@ -171,7 +171,7 @@ impl MultiValuedFastFieldWriter {
|
||||
}
|
||||
{
|
||||
// writing the values themselves.
|
||||
let mut value_serializer: BitpackedFastFieldSerializerLegacy<'_, _>;
|
||||
let mut value_serializer: BitpackedSerializerLegacy<'_, _>;
|
||||
if let Some(mapping) = mapping_opt {
|
||||
value_serializer = serializer.new_u64_fast_field_with_idx(
|
||||
self.field,
|
||||
|
||||
@@ -2,63 +2,18 @@ use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
|
||||
use fastfield_codecs::bitpacked::{
|
||||
BitpackedFastFieldReader as BitpackedReader, BitpackedFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::linearinterpol::{
|
||||
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::multilinearinterpol::{
|
||||
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
|
||||
};
|
||||
use fastfield_codecs::{FastFieldCodecReader, FastFieldCodecSerializer};
|
||||
use common::BinarySerializable;
|
||||
use fastfield_codecs::bitpacked::{BitpackedCodec, BitpackedReader};
|
||||
use fastfield_codecs::blockwise_linear::{BlockwiseLinearCodec, BlockwiseLinearReader};
|
||||
use fastfield_codecs::linear::{LinearCodec, LinearReader};
|
||||
use fastfield_codecs::{Column, FastFieldCodec, FastFieldCodecType};
|
||||
|
||||
use super::{FastValue, GCDFastFieldCodec, GCD_CODEC_ID};
|
||||
use super::gcd::open_gcd_from_bytes;
|
||||
use super::FastValue;
|
||||
use crate::directory::{CompositeFile, Directory, FileSlice, OwnedBytes, RamDirectory, WritePtr};
|
||||
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter, GCDReader};
|
||||
use crate::schema::{Schema, FAST};
|
||||
use crate::DocId;
|
||||
|
||||
/// FastFieldReader is the trait to access fast field data.
|
||||
pub trait FastFieldReader<Item: FastValue>: Clone {
|
||||
/// Return the value associated to the given document.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `doc` is greater than the segment
|
||||
fn get(&self, doc: DocId) -> Item;
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
/// associated with the `DocId` going from
|
||||
/// `start` to `start + output.len()`.
|
||||
///
|
||||
/// Regardless of the type of `Item`, this method works
|
||||
/// - transmuting the output array
|
||||
/// - extracting the `Item`s as if they were `u64`
|
||||
/// - possibly converting the `u64` value to the right type.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `start + output.len()` is greater than
|
||||
/// the segment's `maxdoc`.
|
||||
fn get_range(&self, start: u64, output: &mut [Item]);
|
||||
|
||||
/// Returns the minimum value for this fast field.
|
||||
///
|
||||
/// The min value does not take in account of possible
|
||||
/// deleted document, and should be considered as a lower bound
|
||||
/// of the actual minimum value.
|
||||
fn min_value(&self) -> Item;
|
||||
|
||||
/// Returns the maximum value for this fast field.
|
||||
///
|
||||
/// The max value does not take in account of possible
|
||||
/// deleted document, and should be considered as an upper bound
|
||||
/// of the actual maximum value.
|
||||
fn max_value(&self) -> Item;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// DynamicFastFieldReader wraps different readers to access
|
||||
@@ -67,145 +22,121 @@ pub enum DynamicFastFieldReader<Item: FastValue> {
|
||||
/// Bitpacked compressed fastfield data.
|
||||
Bitpacked(FastFieldReaderCodecWrapper<Item, BitpackedReader>),
|
||||
/// Linear interpolated values + bitpacked
|
||||
LinearInterpol(FastFieldReaderCodecWrapper<Item, LinearInterpolFastFieldReader>),
|
||||
Linear(FastFieldReaderCodecWrapper<Item, LinearReader>),
|
||||
/// Blockwise linear interpolated values + bitpacked
|
||||
MultiLinearInterpol(FastFieldReaderCodecWrapper<Item, MultiLinearInterpolFastFieldReader>),
|
||||
BlockwiseLinear(FastFieldReaderCodecWrapper<Item, BlockwiseLinearReader>),
|
||||
|
||||
/// GCD and Bitpacked compressed fastfield data.
|
||||
BitpackedGCD(FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<BitpackedReader>>),
|
||||
BitpackedGCD(FastFieldReaderCodecWrapper<Item, GCDReader<BitpackedReader>>),
|
||||
/// GCD and Linear interpolated values + bitpacked
|
||||
LinearInterpolGCD(
|
||||
FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<LinearInterpolFastFieldReader>>,
|
||||
),
|
||||
LinearGCD(FastFieldReaderCodecWrapper<Item, GCDReader<LinearReader>>),
|
||||
/// GCD and Blockwise linear interpolated values + bitpacked
|
||||
MultiLinearInterpolGCD(
|
||||
FastFieldReaderCodecWrapper<Item, GCDFastFieldCodec<MultiLinearInterpolFastFieldReader>>,
|
||||
),
|
||||
BlockwiseLinearGCD(FastFieldReaderCodecWrapper<Item, GCDReader<BlockwiseLinearReader>>),
|
||||
}
|
||||
|
||||
impl<Item: FastValue> DynamicFastFieldReader<Item> {
|
||||
/// Returns correct the reader wrapped in the `DynamicFastFieldReader` enum for the data.
|
||||
pub fn open_from_id(
|
||||
mut bytes: OwnedBytes,
|
||||
codec_id: u8,
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> crate::Result<DynamicFastFieldReader<Item>> {
|
||||
let reader = match codec_id {
|
||||
BitpackedFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::Bitpacked(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
BitpackedReader,
|
||||
>::open_from_bytes(bytes)?)
|
||||
let reader = match codec_type {
|
||||
FastFieldCodecType::Bitpacked => {
|
||||
DynamicFastFieldReader::Bitpacked(BitpackedCodec::open_from_bytes(bytes)?.into())
|
||||
}
|
||||
LinearInterpolFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::LinearInterpol(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
LinearInterpolFastFieldReader,
|
||||
>::open_from_bytes(bytes)?)
|
||||
FastFieldCodecType::Linear => {
|
||||
DynamicFastFieldReader::Linear(LinearCodec::open_from_bytes(bytes)?.into())
|
||||
}
|
||||
MultiLinearInterpolFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::MultiLinearInterpol(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
MultiLinearInterpolFastFieldReader,
|
||||
>::open_from_bytes(
|
||||
bytes
|
||||
)?)
|
||||
}
|
||||
_ if codec_id == GCD_CODEC_ID => {
|
||||
let codec_id = bytes.read_u8();
|
||||
|
||||
match codec_id {
|
||||
BitpackedFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::BitpackedGCD(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
GCDFastFieldCodec<BitpackedReader>,
|
||||
>::open_from_bytes(
|
||||
bytes
|
||||
)?)
|
||||
}
|
||||
LinearInterpolFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::LinearInterpolGCD(FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
GCDFastFieldCodec<LinearInterpolFastFieldReader>,
|
||||
>::open_from_bytes(
|
||||
bytes
|
||||
)?)
|
||||
}
|
||||
MultiLinearInterpolFastFieldSerializer::ID => {
|
||||
DynamicFastFieldReader::MultiLinearInterpolGCD(
|
||||
FastFieldReaderCodecWrapper::<
|
||||
Item,
|
||||
GCDFastFieldCodec<MultiLinearInterpolFastFieldReader>,
|
||||
>::open_from_bytes(bytes)?,
|
||||
FastFieldCodecType::BlockwiseLinear => DynamicFastFieldReader::BlockwiseLinear(
|
||||
BlockwiseLinearCodec::open_from_bytes(bytes)?.into(),
|
||||
),
|
||||
FastFieldCodecType::Gcd => {
|
||||
let codec_type = FastFieldCodecType::deserialize(&mut bytes)?;
|
||||
match codec_type {
|
||||
FastFieldCodecType::Bitpacked => DynamicFastFieldReader::BitpackedGCD(
|
||||
open_gcd_from_bytes::<BitpackedCodec>(bytes)?.into(),
|
||||
),
|
||||
FastFieldCodecType::Linear => DynamicFastFieldReader::LinearGCD(
|
||||
open_gcd_from_bytes::<LinearCodec>(bytes)?.into(),
|
||||
),
|
||||
FastFieldCodecType::BlockwiseLinear => {
|
||||
DynamicFastFieldReader::BlockwiseLinearGCD(
|
||||
open_gcd_from_bytes::<BlockwiseLinearCodec>(bytes)?.into(),
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
panic!(
|
||||
"unknown fastfield codec id {:?}. Data corrupted or using old tantivy \
|
||||
version.",
|
||||
codec_id
|
||||
FastFieldCodecType::Gcd => {
|
||||
return Err(DataCorruption::comment_only(
|
||||
"Gcd codec wrapped into another gcd codec. This combination is not \
|
||||
allowed.",
|
||||
)
|
||||
.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
panic!(
|
||||
"unknown fastfield codec id {:?}. Data corrupted or using old tantivy version.",
|
||||
codec_id
|
||||
)
|
||||
}
|
||||
};
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
/// Returns correct the reader wrapped in the `DynamicFastFieldReader` enum for the data.
|
||||
pub fn open(file: FileSlice) -> crate::Result<DynamicFastFieldReader<Item>> {
|
||||
let mut bytes = file.read_bytes()?;
|
||||
let codec_id = bytes.read_u8();
|
||||
|
||||
Self::open_from_id(bytes, codec_id)
|
||||
let codec_type = FastFieldCodecType::deserialize(&mut bytes)?;
|
||||
Self::open_from_id(bytes, codec_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
|
||||
impl<Item: FastValue> Column<Item> for DynamicFastFieldReader<Item> {
|
||||
#[inline]
|
||||
fn get(&self, doc: DocId) -> Item {
|
||||
fn get_val(&self, idx: u64) -> Item {
|
||||
match self {
|
||||
Self::Bitpacked(reader) => reader.get(doc),
|
||||
Self::LinearInterpol(reader) => reader.get(doc),
|
||||
Self::MultiLinearInterpol(reader) => reader.get(doc),
|
||||
Self::BitpackedGCD(reader) => reader.get(doc),
|
||||
Self::LinearInterpolGCD(reader) => reader.get(doc),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.get(doc),
|
||||
Self::Bitpacked(reader) => reader.get_val(idx),
|
||||
Self::Linear(reader) => reader.get_val(idx),
|
||||
Self::BlockwiseLinear(reader) => reader.get_val(idx),
|
||||
Self::BitpackedGCD(reader) => reader.get_val(idx),
|
||||
Self::LinearGCD(reader) => reader.get_val(idx),
|
||||
Self::BlockwiseLinearGCD(reader) => reader.get_val(idx),
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
fn get_range(&self, start: u64, output: &mut [Item]) {
|
||||
match self {
|
||||
Self::Bitpacked(reader) => reader.get_range(start, output),
|
||||
Self::LinearInterpol(reader) => reader.get_range(start, output),
|
||||
Self::MultiLinearInterpol(reader) => reader.get_range(start, output),
|
||||
Self::Linear(reader) => reader.get_range(start, output),
|
||||
Self::BlockwiseLinear(reader) => reader.get_range(start, output),
|
||||
Self::BitpackedGCD(reader) => reader.get_range(start, output),
|
||||
Self::LinearInterpolGCD(reader) => reader.get_range(start, output),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.get_range(start, output),
|
||||
Self::LinearGCD(reader) => reader.get_range(start, output),
|
||||
Self::BlockwiseLinearGCD(reader) => reader.get_range(start, output),
|
||||
}
|
||||
}
|
||||
fn min_value(&self) -> Item {
|
||||
match self {
|
||||
Self::Bitpacked(reader) => reader.min_value(),
|
||||
Self::LinearInterpol(reader) => reader.min_value(),
|
||||
Self::MultiLinearInterpol(reader) => reader.min_value(),
|
||||
Self::Linear(reader) => reader.min_value(),
|
||||
Self::BlockwiseLinear(reader) => reader.min_value(),
|
||||
Self::BitpackedGCD(reader) => reader.min_value(),
|
||||
Self::LinearInterpolGCD(reader) => reader.min_value(),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.min_value(),
|
||||
Self::LinearGCD(reader) => reader.min_value(),
|
||||
Self::BlockwiseLinearGCD(reader) => reader.min_value(),
|
||||
}
|
||||
}
|
||||
fn max_value(&self) -> Item {
|
||||
match self {
|
||||
Self::Bitpacked(reader) => reader.max_value(),
|
||||
Self::LinearInterpol(reader) => reader.max_value(),
|
||||
Self::MultiLinearInterpol(reader) => reader.max_value(),
|
||||
Self::Linear(reader) => reader.max_value(),
|
||||
Self::BlockwiseLinear(reader) => reader.max_value(),
|
||||
Self::BitpackedGCD(reader) => reader.max_value(),
|
||||
Self::LinearInterpolGCD(reader) => reader.max_value(),
|
||||
Self::MultiLinearInterpolGCD(reader) => reader.max_value(),
|
||||
Self::LinearGCD(reader) => reader.max_value(),
|
||||
Self::BlockwiseLinearGCD(reader) => reader.max_value(),
|
||||
}
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
match self {
|
||||
Self::Bitpacked(reader) => reader.num_vals(),
|
||||
Self::Linear(reader) => reader.num_vals(),
|
||||
Self::BlockwiseLinear(reader) => reader.num_vals(),
|
||||
Self::BitpackedGCD(reader) => reader.num_vals(),
|
||||
Self::LinearGCD(reader) => reader.num_vals(),
|
||||
Self::BlockwiseLinearGCD(reader) => reader.num_vals(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -216,35 +147,24 @@ impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
|
||||
#[derive(Clone)]
|
||||
pub struct FastFieldReaderCodecWrapper<Item: FastValue, CodecReader> {
|
||||
reader: CodecReader,
|
||||
bytes: OwnedBytes,
|
||||
_phantom: PhantomData<Item>,
|
||||
}
|
||||
|
||||
impl<Item: FastValue, C: FastFieldCodecReader> FastFieldReaderCodecWrapper<Item, C> {
|
||||
/// Opens a fast field given a file.
|
||||
pub fn open(file: FileSlice) -> crate::Result<Self> {
|
||||
let mut bytes = file.read_bytes()?;
|
||||
let codec_id = bytes.read_u8();
|
||||
assert_eq!(
|
||||
BitpackedFastFieldSerializer::ID,
|
||||
codec_id,
|
||||
"Tried to open fast field as bitpacked encoded (id=1), but got serializer with \
|
||||
different id"
|
||||
);
|
||||
Self::open_from_bytes(bytes)
|
||||
}
|
||||
/// Opens a fast field given the bytes.
|
||||
pub fn open_from_bytes(bytes: OwnedBytes) -> crate::Result<Self> {
|
||||
let reader = C::open_from_bytes(bytes.as_slice())?;
|
||||
Ok(FastFieldReaderCodecWrapper {
|
||||
impl<Item: FastValue, CodecReader> From<CodecReader>
|
||||
for FastFieldReaderCodecWrapper<Item, CodecReader>
|
||||
{
|
||||
fn from(reader: CodecReader) -> Self {
|
||||
FastFieldReaderCodecWrapper {
|
||||
reader,
|
||||
bytes,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Item: FastValue, D: Column> FastFieldReaderCodecWrapper<Item, D> {
|
||||
#[inline]
|
||||
pub(crate) fn get_u64(&self, doc: u64) -> Item {
|
||||
let data = self.reader.get_u64(doc, self.bytes.as_slice());
|
||||
pub(crate) fn get_u64(&self, idx: u64) -> Item {
|
||||
let data = self.reader.get_val(idx);
|
||||
Item::from_u64(data)
|
||||
}
|
||||
|
||||
@@ -267,9 +187,7 @@ impl<Item: FastValue, C: FastFieldCodecReader> FastFieldReaderCodecWrapper<Item,
|
||||
}
|
||||
}
|
||||
|
||||
impl<Item: FastValue, C: FastFieldCodecReader + Clone> FastFieldReader<Item>
|
||||
for FastFieldReaderCodecWrapper<Item, C>
|
||||
{
|
||||
impl<Item: FastValue, C: Column + Clone> Column<Item> for FastFieldReaderCodecWrapper<Item, C> {
|
||||
/// Return the value associated to the given document.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
@@ -278,8 +196,8 @@ impl<Item: FastValue, C: FastFieldCodecReader + Clone> FastFieldReader<Item>
|
||||
///
|
||||
/// May panic if `doc` is greater than the segment
|
||||
// `maxdoc`.
|
||||
fn get(&self, doc: DocId) -> Item {
|
||||
self.get_u64(u64::from(doc))
|
||||
fn get_val(&self, idx: u64) -> Item {
|
||||
self.get_u64(idx)
|
||||
}
|
||||
|
||||
/// Fills an output buffer with the fast field values
|
||||
@@ -316,6 +234,10 @@ impl<Item: FastValue, C: FastFieldCodecReader + Clone> FastFieldReader<Item>
|
||||
fn max_value(&self) -> Item {
|
||||
Item::from_u64(self.reader.max_value())
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.reader.num_vals()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Item: FastValue> From<Vec<Item>> for DynamicFastFieldReader<Item> {
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
use std::io::{self, Write};
|
||||
use std::num::NonZeroU64;
|
||||
|
||||
use common::{BinarySerializable, CountingWriter};
|
||||
pub use fastfield_codecs::bitpacked::{
|
||||
BitpackedFastFieldSerializer, BitpackedFastFieldSerializerLegacy,
|
||||
};
|
||||
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
|
||||
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
|
||||
pub use fastfield_codecs::{FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
|
||||
use fastdivide::DividerU64;
|
||||
pub use fastfield_codecs::bitpacked::{BitpackedCodec, BitpackedSerializerLegacy};
|
||||
use fastfield_codecs::blockwise_linear::BlockwiseLinearCodec;
|
||||
use fastfield_codecs::linear::LinearCodec;
|
||||
use fastfield_codecs::FastFieldCodecType;
|
||||
pub use fastfield_codecs::{Column, FastFieldCodec, FastFieldStats};
|
||||
|
||||
use super::{find_gcd, FastFieldCodecName, ALL_CODECS, GCD_DEFAULT};
|
||||
use super::{find_gcd, ALL_CODECS, GCD_DEFAULT};
|
||||
use crate::directory::{CompositeWrite, WritePtr};
|
||||
use crate::fastfield::gcd::write_gcd_header;
|
||||
use crate::fastfield::GCD_CODEC_ID;
|
||||
use crate::schema::Field;
|
||||
|
||||
/// `CompositeFastFieldSerializer` is in charge of serializing
|
||||
@@ -41,7 +41,7 @@ pub struct CompositeFastFieldSerializer {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FastFieldCodecEnableCheck {
|
||||
enabled_codecs: Vec<FastFieldCodecName>,
|
||||
enabled_codecs: Vec<FastFieldCodecType>,
|
||||
}
|
||||
impl FastFieldCodecEnableCheck {
|
||||
fn allow_all() -> Self {
|
||||
@@ -49,31 +49,28 @@ impl FastFieldCodecEnableCheck {
|
||||
enabled_codecs: ALL_CODECS.to_vec(),
|
||||
}
|
||||
}
|
||||
fn is_enabled(&self, codec_name: FastFieldCodecName) -> bool {
|
||||
self.enabled_codecs.contains(&codec_name)
|
||||
fn is_enabled(&self, code_type: FastFieldCodecType) -> bool {
|
||||
self.enabled_codecs.contains(&code_type)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FastFieldCodecName> for FastFieldCodecEnableCheck {
|
||||
fn from(codec_name: FastFieldCodecName) -> Self {
|
||||
impl From<FastFieldCodecType> for FastFieldCodecEnableCheck {
|
||||
fn from(code_type: FastFieldCodecType) -> Self {
|
||||
FastFieldCodecEnableCheck {
|
||||
enabled_codecs: vec![codec_name],
|
||||
enabled_codecs: vec![code_type],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use this, when this is merged and stabilized explicit_generic_args_with_impl_trait
|
||||
// https://github.com/rust-lang/rust/pull/86176
|
||||
fn codec_estimation<T: FastFieldCodecSerializer, A: FastFieldDataAccess>(
|
||||
stats: FastFieldStats,
|
||||
fastfield_accessor: &A,
|
||||
estimations: &mut Vec<(f32, &str, u8)>,
|
||||
fn codec_estimation<C: FastFieldCodec>(
|
||||
fastfield_accessor: &impl Column,
|
||||
estimations: &mut Vec<(f32, FastFieldCodecType)>,
|
||||
) {
|
||||
if !T::is_applicable(fastfield_accessor, stats.clone()) {
|
||||
return;
|
||||
if let Some(ratio) = C::estimate(fastfield_accessor) {
|
||||
estimations.push((ratio, C::CODEC_TYPE));
|
||||
}
|
||||
let (ratio, name, id) = (T::estimate(fastfield_accessor, stats), T::NAME, T::ID);
|
||||
estimations.push((ratio, name, id));
|
||||
}
|
||||
|
||||
impl CompositeFastFieldSerializer {
|
||||
@@ -97,99 +94,100 @@ impl CompositeFastFieldSerializer {
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field<F, I>(
|
||||
pub fn create_auto_detect_u64_fast_field(
|
||||
&mut self,
|
||||
field: Field,
|
||||
stats: FastFieldStats,
|
||||
fastfield_accessor: impl FastFieldDataAccess,
|
||||
iter_gen: F,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
F: Fn() -> I,
|
||||
I: Iterator<Item = u64>,
|
||||
{
|
||||
self.create_auto_detect_u64_fast_field_with_idx(
|
||||
field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter_gen,
|
||||
0,
|
||||
)
|
||||
fastfield_accessor: impl Column,
|
||||
) -> io::Result<()> {
|
||||
self.create_auto_detect_u64_fast_field_with_idx(field, fastfield_accessor, 0)
|
||||
}
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn write_header<W: Write>(field_write: &mut W, codec_id: u8) -> io::Result<()> {
|
||||
codec_id.serialize(field_write)?;
|
||||
|
||||
pub fn write_header<W: Write>(
|
||||
field_write: &mut W,
|
||||
codec_type: FastFieldCodecType,
|
||||
) -> io::Result<()> {
|
||||
codec_type.to_code().serialize(field_write)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec will be chosen
|
||||
/// automatically.
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx<F, I>(
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx(
|
||||
&mut self,
|
||||
field: Field,
|
||||
stats: FastFieldStats,
|
||||
fastfield_accessor: impl FastFieldDataAccess,
|
||||
iter_gen: F,
|
||||
fastfield_accessor: impl Column,
|
||||
idx: usize,
|
||||
) -> io::Result<()>
|
||||
where
|
||||
F: Fn() -> I,
|
||||
I: Iterator<Item = u64>,
|
||||
{
|
||||
) -> io::Result<()> {
|
||||
let min_value = fastfield_accessor.min_value();
|
||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||
let gcd = find_gcd(iter_gen().map(|val| val - stats.min_value)).unwrap_or(GCD_DEFAULT);
|
||||
let gcd = find_gcd(fastfield_accessor.iter().map(|val| val - min_value))
|
||||
.map(NonZeroU64::get)
|
||||
.unwrap_or(GCD_DEFAULT);
|
||||
|
||||
if gcd == 1 {
|
||||
return Self::create_auto_detect_u64_fast_field_with_idx_gcd(
|
||||
self.codec_enable_checker.clone(),
|
||||
field,
|
||||
field_write,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter_gen(),
|
||||
iter_gen(),
|
||||
);
|
||||
}
|
||||
|
||||
Self::write_header(field_write, GCD_CODEC_ID)?;
|
||||
struct GCDWrappedFFAccess<T: FastFieldDataAccess> {
|
||||
Self::write_header(field_write, FastFieldCodecType::Gcd)?;
|
||||
struct GCDWrappedFFAccess<T: Column> {
|
||||
fastfield_accessor: T,
|
||||
min_value: u64,
|
||||
gcd: u64,
|
||||
base_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u64,
|
||||
gcd: DividerU64,
|
||||
}
|
||||
impl<T: FastFieldDataAccess> FastFieldDataAccess for GCDWrappedFFAccess<T> {
|
||||
|
||||
impl<T: Column> Column for GCDWrappedFFAccess<T> {
|
||||
fn get_val(&self, position: u64) -> u64 {
|
||||
(self.fastfield_accessor.get_val(position) - self.min_value) / self.gcd
|
||||
self.gcd
|
||||
.divide(self.fastfield_accessor.get_val(position) - self.base_value)
|
||||
}
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.fastfield_accessor
|
||||
.iter()
|
||||
.map(|val| self.gcd.divide(val - self.base_value)),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
let num_vals = fastfield_accessor.num_vals();
|
||||
let base_value = fastfield_accessor.min_value();
|
||||
let max_value = (fastfield_accessor.max_value() - fastfield_accessor.min_value()) / gcd;
|
||||
|
||||
let fastfield_accessor = GCDWrappedFFAccess {
|
||||
fastfield_accessor,
|
||||
min_value: stats.min_value,
|
||||
gcd,
|
||||
base_value,
|
||||
max_value,
|
||||
num_vals,
|
||||
gcd: DividerU64::divide_by(gcd),
|
||||
};
|
||||
|
||||
let min_value = stats.min_value;
|
||||
let stats = FastFieldStats {
|
||||
min_value: 0,
|
||||
max_value: (stats.max_value - stats.min_value) / gcd,
|
||||
num_vals: stats.num_vals,
|
||||
};
|
||||
let iter1 = iter_gen().map(|val| (val - min_value) / gcd);
|
||||
let iter2 = iter_gen().map(|val| (val - min_value) / gcd);
|
||||
Self::create_auto_detect_u64_fast_field_with_idx_gcd(
|
||||
self.codec_enable_checker.clone(),
|
||||
field,
|
||||
field_write,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
write_gcd_header(field_write, min_value, gcd)?;
|
||||
write_gcd_header(field_write, base_value, gcd, num_vals)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -199,38 +197,23 @@ impl CompositeFastFieldSerializer {
|
||||
codec_enable_checker: FastFieldCodecEnableCheck,
|
||||
field: Field,
|
||||
field_write: &mut CountingWriter<W>,
|
||||
stats: FastFieldStats,
|
||||
fastfield_accessor: impl FastFieldDataAccess,
|
||||
iter1: impl Iterator<Item = u64>,
|
||||
iter2: impl Iterator<Item = u64>,
|
||||
fastfield_accessor: impl Column,
|
||||
) -> io::Result<()> {
|
||||
let mut estimations = vec![];
|
||||
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecName::Bitpacked) {
|
||||
codec_estimation::<BitpackedFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecType::Bitpacked) {
|
||||
codec_estimation::<BitpackedCodec>(&fastfield_accessor, &mut estimations);
|
||||
}
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecName::LinearInterpol) {
|
||||
codec_estimation::<LinearInterpolFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecType::Linear) {
|
||||
codec_estimation::<LinearCodec>(&fastfield_accessor, &mut estimations);
|
||||
}
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecName::BlockwiseLinearInterpol) {
|
||||
codec_estimation::<MultiLinearInterpolFastFieldSerializer, _>(
|
||||
stats.clone(),
|
||||
&fastfield_accessor,
|
||||
&mut estimations,
|
||||
);
|
||||
if codec_enable_checker.is_enabled(FastFieldCodecType::BlockwiseLinear) {
|
||||
codec_estimation::<BlockwiseLinearCodec>(&fastfield_accessor, &mut estimations);
|
||||
}
|
||||
if let Some(broken_estimation) = estimations.iter().find(|estimation| estimation.0.is_nan())
|
||||
{
|
||||
warn!(
|
||||
"broken estimation for fast field codec {}",
|
||||
"broken estimation for fast field codec {:?}",
|
||||
broken_estimation.1
|
||||
);
|
||||
}
|
||||
@@ -238,43 +221,25 @@ impl CompositeFastFieldSerializer {
|
||||
// codecs
|
||||
estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX);
|
||||
estimations.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
|
||||
let (_ratio, name, id) = estimations[0];
|
||||
debug!(
|
||||
"choosing fast field codec {} for field_id {:?}",
|
||||
name, field
|
||||
); // todo print actual field name
|
||||
let (_ratio, codec_type) = estimations[0];
|
||||
debug!("choosing fast field codec {codec_type:?} for field_id {field:?}"); // todo print actual field name
|
||||
|
||||
Self::write_header(field_write, id)?;
|
||||
match name {
|
||||
BitpackedFastFieldSerializer::NAME => {
|
||||
BitpackedFastFieldSerializer::serialize(
|
||||
field_write,
|
||||
&fastfield_accessor,
|
||||
stats,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
Self::write_header(field_write, codec_type)?;
|
||||
match codec_type {
|
||||
FastFieldCodecType::Bitpacked => {
|
||||
BitpackedCodec::serialize(field_write, &fastfield_accessor)?;
|
||||
}
|
||||
LinearInterpolFastFieldSerializer::NAME => {
|
||||
LinearInterpolFastFieldSerializer::serialize(
|
||||
field_write,
|
||||
&fastfield_accessor,
|
||||
stats,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
FastFieldCodecType::Linear => {
|
||||
LinearCodec::serialize(field_write, &fastfield_accessor)?;
|
||||
}
|
||||
MultiLinearInterpolFastFieldSerializer::NAME => {
|
||||
MultiLinearInterpolFastFieldSerializer::serialize(
|
||||
field_write,
|
||||
&fastfield_accessor,
|
||||
stats,
|
||||
iter1,
|
||||
iter2,
|
||||
)?;
|
||||
FastFieldCodecType::BlockwiseLinear => {
|
||||
BlockwiseLinearCodec::serialize(field_write, &fastfield_accessor)?;
|
||||
}
|
||||
_ => {
|
||||
panic!("unknown fastfield serializer {}", name)
|
||||
FastFieldCodecType::Gcd => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"GCD codec not supported.",
|
||||
));
|
||||
}
|
||||
}
|
||||
field_write.flush()?;
|
||||
@@ -288,7 +253,7 @@ impl CompositeFastFieldSerializer {
|
||||
field: Field,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
|
||||
) -> io::Result<BitpackedSerializerLegacy<'_, CountingWriter<WritePtr>>> {
|
||||
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
|
||||
}
|
||||
|
||||
@@ -298,7 +263,7 @@ impl CompositeFastFieldSerializer {
|
||||
field: Field,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
|
||||
) -> io::Result<BitpackedSerializerLegacy<'_, CountingWriter<WritePtr>>> {
|
||||
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
|
||||
}
|
||||
|
||||
@@ -309,12 +274,11 @@ impl CompositeFastFieldSerializer {
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
idx: usize,
|
||||
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
|
||||
) -> io::Result<BitpackedSerializerLegacy<'_, CountingWriter<WritePtr>>> {
|
||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||
// Prepend codec id to field data for compatibility with DynamicFastFieldReader.
|
||||
let id = BitpackedFastFieldSerializer::ID;
|
||||
id.serialize(field_write)?;
|
||||
BitpackedFastFieldSerializerLegacy::open(field_write, min_value, max_value)
|
||||
FastFieldCodecType::Bitpacked.serialize(field_write)?;
|
||||
BitpackedSerializerLegacy::open(field_write, min_value, max_value)
|
||||
}
|
||||
|
||||
/// Start serializing a new [u8] fast field
|
||||
|
||||
@@ -2,12 +2,13 @@ use std::collections::HashMap;
|
||||
use std::io;
|
||||
|
||||
use common;
|
||||
use fastfield_codecs::Column;
|
||||
use fnv::FnvHashMap;
|
||||
use tantivy_bitpacker::BlockedBitpacker;
|
||||
|
||||
use super::multivalued::MultiValuedFastFieldWriter;
|
||||
use super::serializer::FastFieldStats;
|
||||
use super::{FastFieldDataAccess, FastFieldType, FastValue};
|
||||
use super::{FastFieldType, FastValue};
|
||||
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::UnorderedTermId;
|
||||
@@ -359,38 +360,20 @@ impl IntFastFieldWriter {
|
||||
(self.val_min, self.val_max)
|
||||
};
|
||||
|
||||
let fastfield_accessor = WriterFastFieldAccessProvider {
|
||||
doc_id_map,
|
||||
vals: &self.vals,
|
||||
};
|
||||
let stats = FastFieldStats {
|
||||
min_value: min,
|
||||
max_value: max,
|
||||
num_vals: self.val_count as u64,
|
||||
};
|
||||
|
||||
if let Some(doc_id_map) = doc_id_map {
|
||||
let iter_gen = || {
|
||||
doc_id_map
|
||||
.iter_old_doc_ids()
|
||||
.map(|doc_id| self.vals.get(doc_id as usize))
|
||||
};
|
||||
serializer.create_auto_detect_u64_fast_field(
|
||||
self.field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter_gen,
|
||||
)?;
|
||||
} else {
|
||||
let iter_gen = || self.vals.iter();
|
||||
|
||||
serializer.create_auto_detect_u64_fast_field(
|
||||
self.field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter_gen,
|
||||
)?;
|
||||
let fastfield_accessor = WriterFastFieldAccessProvider {
|
||||
doc_id_map,
|
||||
vals: &self.vals,
|
||||
stats,
|
||||
};
|
||||
|
||||
serializer.create_auto_detect_u64_fast_field(self.field, fastfield_accessor)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -399,8 +382,9 @@ impl IntFastFieldWriter {
|
||||
struct WriterFastFieldAccessProvider<'map, 'bitp> {
|
||||
doc_id_map: Option<&'map DocIdMapping>,
|
||||
vals: &'bitp BlockedBitpacker,
|
||||
stats: FastFieldStats,
|
||||
}
|
||||
impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'bitp> {
|
||||
impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
|
||||
/// Return the value associated to the given doc.
|
||||
///
|
||||
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
|
||||
@@ -419,4 +403,28 @@ impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'b
|
||||
self.vals.get(doc as usize)
|
||||
}
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
if let Some(doc_id_map) = self.doc_id_map {
|
||||
Box::new(
|
||||
doc_id_map
|
||||
.iter_old_doc_ids()
|
||||
.map(|doc_id| self.vals.get(doc_id as usize)),
|
||||
)
|
||||
} else {
|
||||
Box::new(self.vals.iter())
|
||||
}
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.stats.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,35 +2,42 @@
|
||||
//! to get mappings from old doc_id to new doc_id and vice versa, after sorting
|
||||
|
||||
use std::cmp::Reverse;
|
||||
use std::ops::Index;
|
||||
|
||||
use super::SegmentWriter;
|
||||
use crate::schema::{Field, Schema};
|
||||
use crate::{DocId, IndexSortByField, Order, SegmentOrdinal, TantivyError};
|
||||
use crate::{DocAddress, DocId, IndexSortByField, Order, TantivyError};
|
||||
|
||||
/// Struct to provide mapping from new doc_id to old doc_id and segment.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SegmentDocIdMapping {
|
||||
new_doc_id_to_old_and_segment: Vec<(DocId, SegmentOrdinal)>,
|
||||
new_doc_id_to_old_doc_addr: Vec<DocAddress>,
|
||||
is_trivial: bool,
|
||||
}
|
||||
|
||||
impl SegmentDocIdMapping {
|
||||
pub(crate) fn new(
|
||||
new_doc_id_to_old_and_segment: Vec<(DocId, SegmentOrdinal)>,
|
||||
is_trivial: bool,
|
||||
) -> Self {
|
||||
pub(crate) fn new(new_doc_id_to_old_and_segment: Vec<DocAddress>, is_trivial: bool) -> Self {
|
||||
Self {
|
||||
new_doc_id_to_old_and_segment,
|
||||
new_doc_id_to_old_doc_addr: new_doc_id_to_old_and_segment,
|
||||
is_trivial,
|
||||
}
|
||||
}
|
||||
pub(crate) fn iter(&self) -> impl Iterator<Item = &(DocId, SegmentOrdinal)> {
|
||||
self.new_doc_id_to_old_and_segment.iter()
|
||||
|
||||
/// Returns an iterator over the old document addresses, ordered by the new document ids.
|
||||
///
|
||||
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targetted segment
|
||||
/// in the list of merged segments.
|
||||
pub(crate) fn iter_old_doc_addrs(&self) -> impl Iterator<Item = DocAddress> + '_ {
|
||||
self.new_doc_id_to_old_doc_addr.iter().copied()
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.new_doc_id_to_old_and_segment.len()
|
||||
self.new_doc_id_to_old_doc_addr.len()
|
||||
}
|
||||
|
||||
pub(crate) fn get_old_doc_addr(&self, new_doc_id: DocId) -> DocAddress {
|
||||
self.new_doc_id_to_old_doc_addr[new_doc_id as usize]
|
||||
}
|
||||
|
||||
/// This flags means the segments are simply stacked in the order of their ordinal.
|
||||
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
|
||||
///
|
||||
@@ -39,21 +46,6 @@ impl SegmentDocIdMapping {
|
||||
self.is_trivial
|
||||
}
|
||||
}
|
||||
impl Index<usize> for SegmentDocIdMapping {
|
||||
type Output = (DocId, SegmentOrdinal);
|
||||
|
||||
fn index(&self, idx: usize) -> &Self::Output {
|
||||
&self.new_doc_id_to_old_and_segment[idx]
|
||||
}
|
||||
}
|
||||
impl IntoIterator for SegmentDocIdMapping {
|
||||
type Item = (DocId, SegmentOrdinal);
|
||||
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.new_doc_id_to_old_and_segment.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct to provide mapping from old doc_id to new doc_id and vice versa within a segment.
|
||||
pub struct DocIdMapping {
|
||||
@@ -151,8 +143,9 @@ pub(crate) fn get_doc_id_mapping_from_field(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_indexsorting {
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::TopDocs;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{Schema, *};
|
||||
@@ -472,9 +465,9 @@ mod tests_indexsorting {
|
||||
let my_number = index.schema().get_field("my_number").unwrap();
|
||||
|
||||
let fast_field = fast_fields.u64(my_number).unwrap();
|
||||
assert_eq!(fast_field.get(0u32), 10u64);
|
||||
assert_eq!(fast_field.get(1u32), 20u64);
|
||||
assert_eq!(fast_field.get(2u32), 30u64);
|
||||
assert_eq!(fast_field.get_val(0), 10u64);
|
||||
assert_eq!(fast_field.get_val(1), 20u64);
|
||||
assert_eq!(fast_field.get_val(2), 30u64);
|
||||
|
||||
let multi_numbers = index.schema().get_field("multi_numbers").unwrap();
|
||||
let multifield = fast_fields.u64s(multi_numbers).unwrap();
|
||||
|
||||
@@ -174,9 +174,7 @@ fn index_documents(
|
||||
segment_updater: &mut SegmentUpdater,
|
||||
mut delete_cursor: DeleteCursor,
|
||||
) -> crate::Result<()> {
|
||||
let schema = segment.schema();
|
||||
|
||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), schema)?;
|
||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone())?;
|
||||
for document_group in grouped_document_iterator {
|
||||
for doc in document_group {
|
||||
segment_writer.add_document(doc)?;
|
||||
@@ -777,6 +775,7 @@ impl Drop for IndexWriter {
|
||||
mod tests {
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use proptest::prelude::*;
|
||||
use proptest::prop_oneof;
|
||||
use proptest::strategy::Strategy;
|
||||
@@ -785,7 +784,6 @@ mod tests {
|
||||
use crate::collector::TopDocs;
|
||||
use crate::directory::error::LockError;
|
||||
use crate::error::*;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::{QueryParser, TermQuery};
|
||||
use crate::schema::{
|
||||
@@ -1327,7 +1325,7 @@ mod tests {
|
||||
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
|
||||
let in_order_alive_ids: Vec<u64> = segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(|doc| fast_field_reader.get(doc))
|
||||
.map(|doc| fast_field_reader.get_val(doc as u64))
|
||||
.collect();
|
||||
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 1, 0]);
|
||||
Ok(())
|
||||
@@ -1493,7 +1491,7 @@ mod tests {
|
||||
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(move |doc| ff_reader.get(doc))
|
||||
.map(move |doc| ff_reader.get_val(doc as u64))
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -1504,7 +1502,7 @@ mod tests {
|
||||
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(move |doc| ff_reader.get(doc))
|
||||
.map(move |doc| ff_reader.get_val(doc as u64))
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -1622,7 +1620,7 @@ mod tests {
|
||||
facet_reader
|
||||
.facet_from_ord(facet_ords[0], &mut facet)
|
||||
.unwrap();
|
||||
let id = ff_reader.get(doc_id);
|
||||
let id = ff_reader.get_val(doc_id as u64);
|
||||
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
|
||||
|
||||
assert_eq!(facet, facet_expected);
|
||||
|
||||
@@ -4,14 +4,13 @@ use std::sync::Arc;
|
||||
|
||||
use itertools::Itertools;
|
||||
use measure_time::debug_time;
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
use crate::core::{Segment, SegmentReader};
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{
|
||||
AliveBitSet, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldDataAccess,
|
||||
FastFieldReader, FastFieldStats, MultiValueLength, MultiValuedFastFieldReader,
|
||||
AliveBitSet, Column, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldStats,
|
||||
MultiValueLength, MultiValuedFastFieldReader,
|
||||
};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
|
||||
@@ -21,8 +20,8 @@ use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||
use crate::store::StoreWriter;
|
||||
use crate::termdict::{TermMerger, TermOrdinal};
|
||||
use crate::{
|
||||
DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order, SegmentComponent,
|
||||
SegmentOrdinal,
|
||||
DocAddress, DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order,
|
||||
SegmentComponent, SegmentOrdinal,
|
||||
};
|
||||
|
||||
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
|
||||
@@ -88,7 +87,7 @@ pub struct IndexMerger {
|
||||
}
|
||||
|
||||
fn compute_min_max_val(
|
||||
u64_reader: &impl FastFieldReader<u64>,
|
||||
u64_reader: &impl Column<u64>,
|
||||
segment_reader: &SegmentReader,
|
||||
) -> Option<(u64, u64)> {
|
||||
if segment_reader.max_doc() == 0 {
|
||||
@@ -102,11 +101,11 @@ fn compute_min_max_val(
|
||||
}
|
||||
// some deleted documents,
|
||||
// we need to recompute the max / min
|
||||
minmax(
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(|doc_id| u64_reader.get(doc_id)),
|
||||
)
|
||||
segment_reader
|
||||
.doc_ids_alive()
|
||||
.map(|doc_id| u64_reader.get_val(doc_id as u64))
|
||||
.minmax()
|
||||
.into_option()
|
||||
}
|
||||
|
||||
struct TermOrdinalMapping {
|
||||
@@ -134,7 +133,7 @@ impl TermOrdinalMapping {
|
||||
fn max_term_ord(&self) -> TermOrdinal {
|
||||
self.per_segment_new_term_ordinals
|
||||
.iter()
|
||||
.flat_map(|term_ordinals| term_ordinals.iter().cloned().max())
|
||||
.flat_map(|term_ordinals| term_ordinals.iter().max().cloned())
|
||||
.max()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
@@ -260,9 +259,9 @@ impl IndexMerger {
|
||||
.iter()
|
||||
.map(|reader| reader.get_fieldnorms_reader(field))
|
||||
.collect::<Result<_, _>>()?;
|
||||
for (doc_id, reader_ordinal) in doc_id_mapping.iter() {
|
||||
let fieldnorms_reader = &fieldnorms_readers[*reader_ordinal as usize];
|
||||
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(*doc_id);
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let fieldnorms_reader = &fieldnorms_readers[old_doc_addr.segment_ord as usize];
|
||||
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(old_doc_addr.doc_id);
|
||||
fieldnorms_data.push(fieldnorm_id);
|
||||
}
|
||||
|
||||
@@ -374,29 +373,46 @@ impl IndexMerger {
|
||||
struct SortedDocIdFieldAccessProvider<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: &'a Vec<DynamicFastFieldReader<u64>>,
|
||||
stats: FastFieldStats,
|
||||
}
|
||||
impl<'a> FastFieldDataAccess for SortedDocIdFieldAccessProvider<'a> {
|
||||
impl<'a> Column for SortedDocIdFieldAccessProvider<'a> {
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
let (doc_id, reader_ordinal) = self.doc_id_mapping[doc as usize];
|
||||
self.fast_field_readers[reader_ordinal as usize].get(doc_id)
|
||||
let DocAddress {
|
||||
doc_id,
|
||||
segment_ord,
|
||||
} = self.doc_id_mapping.get_old_doc_addr(doc as u32);
|
||||
self.fast_field_readers[segment_ord as usize].get_val(doc_id as u64)
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.doc_id_mapping
|
||||
.iter_old_doc_addrs()
|
||||
.map(|old_doc_addr| {
|
||||
let fast_field_reader =
|
||||
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||
fast_field_reader.get_val(old_doc_addr.doc_id as u64)
|
||||
}),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.stats.num_vals
|
||||
}
|
||||
}
|
||||
let fastfield_accessor = SortedDocIdFieldAccessProvider {
|
||||
doc_id_mapping,
|
||||
fast_field_readers: &fast_field_readers,
|
||||
};
|
||||
let iter_gen = || {
|
||||
doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
|
||||
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
|
||||
fast_field_reader.get(*doc_id)
|
||||
})
|
||||
};
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(
|
||||
field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter_gen,
|
||||
)?;
|
||||
};
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(field, fastfield_accessor)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -412,7 +428,7 @@ impl IndexMerger {
|
||||
|
||||
let everything_is_in_order = reader_ordinal_and_field_accessors
|
||||
.into_iter()
|
||||
.map(|reader| reader.1)
|
||||
.map(|(_, col)| Arc::new(col))
|
||||
.tuple_windows()
|
||||
.all(|(field_accessor1, field_accessor2)| {
|
||||
if sort_by_field.order.is_asc() {
|
||||
@@ -427,7 +443,7 @@ impl IndexMerger {
|
||||
pub(crate) fn get_sort_field_accessor(
|
||||
reader: &SegmentReader,
|
||||
sort_by_field: &IndexSortByField,
|
||||
) -> crate::Result<impl FastFieldReader<u64>> {
|
||||
) -> crate::Result<impl Column> {
|
||||
let field_id = expect_field_id_for_sort_field(reader.schema(), sort_by_field)?; // for now expect fastfield, but not strictly required
|
||||
let value_accessor = reader.fast_fields().u64_lenient(field_id)?;
|
||||
Ok(value_accessor)
|
||||
@@ -436,7 +452,7 @@ impl IndexMerger {
|
||||
pub(crate) fn get_reader_with_sort_field_accessor(
|
||||
&self,
|
||||
sort_by_field: &IndexSortByField,
|
||||
) -> crate::Result<Vec<(SegmentOrdinal, impl FastFieldReader<u64> + Clone)>> {
|
||||
) -> crate::Result<Vec<(SegmentOrdinal, impl Column)>> {
|
||||
let reader_ordinal_and_field_accessors = self
|
||||
.readers
|
||||
.iter()
|
||||
@@ -469,15 +485,11 @@ impl IndexMerger {
|
||||
let doc_id_reader_pair =
|
||||
reader_ordinal_and_field_accessors
|
||||
.iter()
|
||||
.map(|reader_and_field_accessor| {
|
||||
let reader = &self.readers[reader_and_field_accessor.0 as usize];
|
||||
reader.doc_ids_alive().map(move |doc_id| {
|
||||
(
|
||||
doc_id,
|
||||
reader_and_field_accessor.0,
|
||||
&reader_and_field_accessor.1,
|
||||
)
|
||||
})
|
||||
.map(|(reader_ord, ff_reader)| {
|
||||
let reader = &self.readers[*reader_ord as usize];
|
||||
reader
|
||||
.doc_ids_alive()
|
||||
.map(move |doc_id| (doc_id, reader_ord, ff_reader))
|
||||
});
|
||||
|
||||
let total_num_new_docs = self
|
||||
@@ -486,22 +498,25 @@ impl IndexMerger {
|
||||
.map(|reader| reader.num_docs() as usize)
|
||||
.sum();
|
||||
|
||||
let mut sorted_doc_ids = Vec::with_capacity(total_num_new_docs);
|
||||
let mut sorted_doc_ids: Vec<DocAddress> = Vec::with_capacity(total_num_new_docs);
|
||||
|
||||
// create iterator tuple of (old doc_id, reader) in order of the new doc_ids
|
||||
sorted_doc_ids.extend(
|
||||
doc_id_reader_pair
|
||||
.into_iter()
|
||||
.kmerge_by(|a, b| {
|
||||
let val1 = a.2.get(a.0);
|
||||
let val2 = b.2.get(b.0);
|
||||
let val1 = a.2.get_val(a.0 as u64);
|
||||
let val2 = b.2.get_val(b.0 as u64);
|
||||
if sort_by_field.order == Order::Asc {
|
||||
val1 < val2
|
||||
} else {
|
||||
val1 > val2
|
||||
}
|
||||
})
|
||||
.map(|(doc_id, reader_with_id, _)| (doc_id, reader_with_id)),
|
||||
.map(|(doc_id, &segment_ord, _)| DocAddress {
|
||||
doc_id,
|
||||
segment_ord,
|
||||
}),
|
||||
);
|
||||
Ok(SegmentDocIdMapping::new(sorted_doc_ids, false))
|
||||
}
|
||||
@@ -550,20 +565,44 @@ impl IndexMerger {
|
||||
|
||||
let mut offsets = Vec::with_capacity(doc_id_mapping.len());
|
||||
let mut offset = 0;
|
||||
for (doc_id, reader) in doc_id_mapping.iter() {
|
||||
let reader = &reader_and_field_accessors[*reader as usize].1;
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
|
||||
offsets.push(offset);
|
||||
offset += reader.get_len(*doc_id) as u64;
|
||||
offset += reader.get_len(old_doc_addr.doc_id) as u64;
|
||||
}
|
||||
offsets.push(offset);
|
||||
|
||||
let iter_gen = || offsets.iter().cloned();
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(
|
||||
field,
|
||||
#[derive(Clone)]
|
||||
struct FieldIndexAccessProvider<'a> {
|
||||
offsets: &'a [u64],
|
||||
stats: FastFieldStats,
|
||||
}
|
||||
impl<'a> Column for FieldIndexAccessProvider<'a> {
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
self.offsets[doc as usize]
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(self.offsets.iter().cloned())
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.stats.num_vals
|
||||
}
|
||||
}
|
||||
let fastfield_accessor = FieldIndexAccessProvider {
|
||||
offsets: &offsets,
|
||||
stats,
|
||||
&offsets[..],
|
||||
iter_gen,
|
||||
)?;
|
||||
};
|
||||
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(field, fastfield_accessor)?;
|
||||
Ok(offsets)
|
||||
}
|
||||
/// Returns the fastfield index (index for the data, not the data).
|
||||
@@ -631,12 +670,12 @@ impl IndexMerger {
|
||||
fast_field_serializer.new_u64_fast_field_with_idx(field, 0u64, max_term_ord, 1)?;
|
||||
let mut vals = Vec::with_capacity(100);
|
||||
|
||||
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let term_ordinal_mapping: &[TermOrdinal] =
|
||||
term_ordinal_mappings.get_segment(*reader_ordinal as usize);
|
||||
term_ordinal_mappings.get_segment(old_doc_addr.segment_ord as usize);
|
||||
|
||||
let ff_reader = &fast_field_reader[*reader_ordinal as usize];
|
||||
ff_reader.get_vals(*old_doc_id, &mut vals);
|
||||
let ff_reader = &fast_field_reader[old_doc_addr.segment_ord as usize];
|
||||
ff_reader.get_vals(old_doc_addr.doc_id, &mut vals);
|
||||
for &prev_term_ord in &vals {
|
||||
let new_term_ord = term_ordinal_mapping[prev_term_ord as usize];
|
||||
serialize_vals.add_val(new_term_ord)?;
|
||||
@@ -657,16 +696,17 @@ impl IndexMerger {
|
||||
.map(|reader| reader.num_docs() as usize)
|
||||
.sum();
|
||||
|
||||
let mut mapping = Vec::with_capacity(total_num_new_docs);
|
||||
let mut mapping: Vec<DocAddress> = Vec::with_capacity(total_num_new_docs);
|
||||
|
||||
mapping.extend(
|
||||
self.readers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(reader_ordinal, reader)| {
|
||||
reader
|
||||
.doc_ids_alive()
|
||||
.map(move |doc_id| (doc_id, reader_ordinal as SegmentOrdinal))
|
||||
.flat_map(|(segment_ord, reader)| {
|
||||
reader.doc_ids_alive().map(move |doc_id| DocAddress {
|
||||
segment_ord: segment_ord as u32,
|
||||
doc_id,
|
||||
})
|
||||
}),
|
||||
);
|
||||
Ok(SegmentDocIdMapping::new(mapping, true))
|
||||
@@ -735,49 +775,68 @@ impl IndexMerger {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: &'a Vec<MultiValuedFastFieldReader<u64>>,
|
||||
offsets: Vec<u64>,
|
||||
stats: FastFieldStats,
|
||||
}
|
||||
impl<'a> FastFieldDataAccess for SortedDocIdMultiValueAccessProvider<'a> {
|
||||
impl<'a> Column for SortedDocIdMultiValueAccessProvider<'a> {
|
||||
fn get_val(&self, pos: u64) -> u64 {
|
||||
// use the offsets index to find the doc_id which will contain the position.
|
||||
// the offsets are strictly increasing so we can do a simple search on it.
|
||||
let new_doc_id = self
|
||||
.offsets
|
||||
.iter()
|
||||
.position(|&offset| offset > pos)
|
||||
.expect("pos is out of bounds")
|
||||
- 1;
|
||||
let new_doc_id: DocId =
|
||||
self.offsets
|
||||
.iter()
|
||||
.position(|&offset| offset > pos)
|
||||
.expect("pos is out of bounds") as DocId
|
||||
- 1u32;
|
||||
|
||||
// now we need to find the position of `pos` in the multivalued bucket
|
||||
let num_pos_covered_until_now = self.offsets[new_doc_id];
|
||||
let num_pos_covered_until_now = self.offsets[new_doc_id as usize];
|
||||
let pos_in_values = pos - num_pos_covered_until_now;
|
||||
|
||||
let (old_doc_id, reader_ordinal) = self.doc_id_mapping[new_doc_id as usize];
|
||||
let num_vals = self.fast_field_readers[reader_ordinal as usize].get_len(old_doc_id);
|
||||
let old_doc_addr = self.doc_id_mapping.get_old_doc_addr(new_doc_id);
|
||||
let num_vals = self.fast_field_readers[old_doc_addr.segment_ord as usize]
|
||||
.get_len(old_doc_addr.doc_id);
|
||||
assert!(num_vals >= pos_in_values);
|
||||
let mut vals = vec![];
|
||||
self.fast_field_readers[reader_ordinal as usize].get_vals(old_doc_id, &mut vals);
|
||||
let mut vals = Vec::new();
|
||||
self.fast_field_readers[old_doc_addr.segment_ord as usize]
|
||||
.get_vals(old_doc_addr.doc_id, &mut vals);
|
||||
|
||||
vals[pos_in_values as usize]
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.doc_id_mapping
|
||||
.iter_old_doc_addrs()
|
||||
.flat_map(|old_doc_addr| {
|
||||
let ff_reader =
|
||||
&self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||
let mut vals = Vec::new();
|
||||
ff_reader.get_vals(old_doc_addr.doc_id, &mut vals);
|
||||
vals.into_iter()
|
||||
}),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.stats.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.stats.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.stats.num_vals
|
||||
}
|
||||
}
|
||||
let fastfield_accessor = SortedDocIdMultiValueAccessProvider {
|
||||
doc_id_mapping,
|
||||
fast_field_readers: &ff_readers,
|
||||
offsets,
|
||||
};
|
||||
let iter_gen = || {
|
||||
doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
|
||||
let ff_reader = &ff_readers[*reader_ordinal as usize];
|
||||
let mut vals = vec![];
|
||||
ff_reader.get_vals(*doc_id, &mut vals);
|
||||
vals.into_iter()
|
||||
})
|
||||
stats,
|
||||
};
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
|
||||
field,
|
||||
stats,
|
||||
fastfield_accessor,
|
||||
iter_gen,
|
||||
1,
|
||||
)?;
|
||||
|
||||
@@ -810,9 +869,9 @@ impl IndexMerger {
|
||||
)?;
|
||||
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1);
|
||||
|
||||
for (doc_id, reader_ordinal) in doc_id_mapping.iter() {
|
||||
let bytes_reader = &reader_and_field_accessors[*reader_ordinal as usize].1;
|
||||
let val = bytes_reader.get_bytes(*doc_id);
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let bytes_reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
|
||||
let val = bytes_reader.get_bytes(old_doc_addr.doc_id);
|
||||
serialize_vals.write_all(val)?;
|
||||
}
|
||||
|
||||
@@ -868,9 +927,9 @@ impl IndexMerger {
|
||||
segment_local_map
|
||||
})
|
||||
.collect();
|
||||
for (new_doc_id, (old_doc_id, segment_ord)) in doc_id_mapping.iter().enumerate() {
|
||||
let segment_map = &mut merged_doc_id_map[*segment_ord as usize];
|
||||
segment_map[*old_doc_id as usize] = Some(new_doc_id as DocId);
|
||||
for (new_doc_id, old_doc_addr) in doc_id_mapping.iter_old_doc_addrs().enumerate() {
|
||||
let segment_map = &mut merged_doc_id_map[old_doc_addr.segment_ord as usize];
|
||||
segment_map[old_doc_addr.doc_id as usize] = Some(new_doc_id as DocId);
|
||||
}
|
||||
|
||||
// Note that the total number of tokens is not exact.
|
||||
@@ -1045,15 +1104,15 @@ impl IndexMerger {
|
||||
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
|
||||
.collect();
|
||||
|
||||
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
|
||||
let doc_bytes_it = &mut document_iterators[*reader_ordinal as usize];
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let doc_bytes_it = &mut document_iterators[old_doc_addr.segment_ord as usize];
|
||||
if let Some(doc_bytes_res) = doc_bytes_it.next() {
|
||||
let doc_bytes = doc_bytes_res?;
|
||||
store_writer.store_bytes(&doc_bytes)?;
|
||||
} else {
|
||||
return Err(DataCorruption::comment_only(&format!(
|
||||
"unexpected missing document in docstore on merge, doc id {:?}",
|
||||
old_doc_id
|
||||
"unexpected missing document in docstore on merge, doc address \
|
||||
{old_doc_addr:?}",
|
||||
))
|
||||
.into());
|
||||
}
|
||||
@@ -1140,6 +1199,7 @@ impl IndexMerger {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use byteorder::{BigEndian, ReadBytesExt};
|
||||
use fastfield_codecs::Column;
|
||||
use schema::FAST;
|
||||
|
||||
use crate::collector::tests::{
|
||||
@@ -1147,7 +1207,6 @@ mod tests {
|
||||
};
|
||||
use crate::collector::{Count, FacetCollector};
|
||||
use crate::core::Index;
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::query::{AllQuery, BooleanQuery, Scorer, TermQuery};
|
||||
use crate::schema::{
|
||||
Cardinality, Document, Facet, FacetOptions, IndexRecordOption, NumericOptions, Term,
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use crate::collector::TopDocs;
|
||||
use crate::core::Index;
|
||||
use crate::fastfield::{AliveBitSet, FastFieldReader, MultiValuedFastFieldReader};
|
||||
use crate::fastfield::{AliveBitSet, MultiValuedFastFieldReader};
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{
|
||||
self, BytesOptions, Cardinality, Facet, FacetOptions, IndexRecordOption, NumericOptions,
|
||||
@@ -186,17 +188,17 @@ mod tests {
|
||||
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let fast_field = fast_fields.u64(int_field).unwrap();
|
||||
assert_eq!(fast_field.get(5u32), 1u64);
|
||||
assert_eq!(fast_field.get(4u32), 2u64);
|
||||
assert_eq!(fast_field.get(3u32), 3u64);
|
||||
assert_eq!(fast_field.get_val(5), 1u64);
|
||||
assert_eq!(fast_field.get_val(4), 2u64);
|
||||
assert_eq!(fast_field.get_val(3), 3u64);
|
||||
if force_disjunct_segment_sort_values {
|
||||
assert_eq!(fast_field.get(2u32), 20u64);
|
||||
assert_eq!(fast_field.get(1u32), 100u64);
|
||||
assert_eq!(fast_field.get_val(2u64), 20u64);
|
||||
assert_eq!(fast_field.get_val(1u64), 100u64);
|
||||
} else {
|
||||
assert_eq!(fast_field.get(2u32), 10u64);
|
||||
assert_eq!(fast_field.get(1u32), 20u64);
|
||||
assert_eq!(fast_field.get_val(2u64), 10u64);
|
||||
assert_eq!(fast_field.get_val(1u64), 20u64);
|
||||
}
|
||||
assert_eq!(fast_field.get(0u32), 1_000u64);
|
||||
assert_eq!(fast_field.get_val(0u64), 1_000u64);
|
||||
|
||||
// test new field norm mapping
|
||||
{
|
||||
@@ -373,12 +375,12 @@ mod tests {
|
||||
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let fast_field = fast_fields.u64(int_field).unwrap();
|
||||
assert_eq!(fast_field.get(0u32), 1u64);
|
||||
assert_eq!(fast_field.get(1u32), 2u64);
|
||||
assert_eq!(fast_field.get(2u32), 3u64);
|
||||
assert_eq!(fast_field.get(3u32), 10u64);
|
||||
assert_eq!(fast_field.get(4u32), 20u64);
|
||||
assert_eq!(fast_field.get(5u32), 1_000u64);
|
||||
assert_eq!(fast_field.get_val(0), 1u64);
|
||||
assert_eq!(fast_field.get_val(1), 2u64);
|
||||
assert_eq!(fast_field.get_val(2), 3u64);
|
||||
assert_eq!(fast_field.get_val(3), 10u64);
|
||||
assert_eq!(fast_field.get_val(4), 20u64);
|
||||
assert_eq!(fast_field.get_val(5), 1_000u64);
|
||||
|
||||
let get_vals = |fast_field: &MultiValuedFastFieldReader<u64>, doc_id: u32| -> Vec<u64> {
|
||||
let mut vals = vec![];
|
||||
@@ -478,13 +480,13 @@ mod tests {
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
mod bench_sorted_index_merge {
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
use test::{self, Bencher};
|
||||
|
||||
use crate::core::Index;
|
||||
// use cratedoc_id, readerdoc_id_mappinglet vals = reader.fate::schema;
|
||||
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
|
||||
use crate::fastfield::DynamicFastFieldReader;
|
||||
use crate::indexer::merger::IndexMerger;
|
||||
use crate::schema::{Cardinality, Document, NumericOptions, Schema};
|
||||
use crate::schema::{Cardinality, NumericOptions, Schema};
|
||||
use crate::{IndexSettings, IndexSortByField, IndexWriter, Order};
|
||||
fn create_index(sort_by_field: Option<IndexSortByField>) -> Index {
|
||||
let mut schema_builder = Schema::builder();
|
||||
@@ -503,9 +505,7 @@ mod bench_sorted_index_merge {
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests().unwrap();
|
||||
let index_doc = |index_writer: &mut IndexWriter, val: u64| {
|
||||
let mut doc = Document::default();
|
||||
doc.add_u64(int_field, val);
|
||||
index_writer.add_document(doc).unwrap();
|
||||
index_writer.add_document(doc!(int_field=>val)).unwrap();
|
||||
};
|
||||
// 3 segments with 10_000 values in the fast fields
|
||||
for _ in 0..3 {
|
||||
@@ -518,6 +518,7 @@ mod bench_sorted_index_merge {
|
||||
}
|
||||
index
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn create_sorted_index_walk_overkmerge_on_merge_fastfield(
|
||||
b: &mut Bencher,
|
||||
@@ -533,19 +534,19 @@ mod bench_sorted_index_merge {
|
||||
IndexMerger::open(index.schema(), index.settings().clone(), &segments[..])?;
|
||||
let doc_id_mapping = merger.generate_doc_id_mapping(&sort_by_field).unwrap();
|
||||
b.iter(|| {
|
||||
let sorted_doc_ids = doc_id_mapping.iter().map(|(doc_id, ordinal)| {
|
||||
let reader = &merger.readers[*ordinal as usize];
|
||||
let sorted_doc_ids = doc_id_mapping.iter_old_doc_addrs().map(|doc_addr| {
|
||||
let reader = &merger.readers[doc_addr.segment_ord as usize];
|
||||
let u64_reader: DynamicFastFieldReader<u64> =
|
||||
reader.fast_fields().typed_fast_field_reader(field).expect(
|
||||
"Failed to find a reader for single fast field. This is a tantivy bug and \
|
||||
it should never happen.",
|
||||
);
|
||||
(doc_id, reader, u64_reader)
|
||||
(doc_addr.doc_id, reader, u64_reader)
|
||||
});
|
||||
// add values in order of the new doc_ids
|
||||
let mut val = 0;
|
||||
for (doc_id, _reader, field_reader) in sorted_doc_ids {
|
||||
val = field_reader.get(*doc_id);
|
||||
val = field_reader.get_val(doc_id as u64);
|
||||
}
|
||||
|
||||
val
|
||||
|
||||
@@ -25,39 +25,10 @@ use crate::indexer::{
|
||||
DefaultMergePolicy, MergeCandidate, MergeOperation, MergePolicy, SegmentEntry,
|
||||
SegmentSerializer,
|
||||
};
|
||||
use crate::schema::Schema;
|
||||
use crate::{FutureResult, Opstamp};
|
||||
|
||||
const NUM_MERGE_THREADS: usize = 4;
|
||||
|
||||
/// Save the index meta file.
|
||||
/// This operation is atomic :
|
||||
/// Either
|
||||
/// - it fails, in which case an error is returned,
|
||||
/// and the `meta.json` remains untouched,
|
||||
/// - it succeeds, and `meta.json` is written
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
pub fn save_new_metas(
|
||||
schema: Schema,
|
||||
index_settings: IndexSettings,
|
||||
directory: &dyn Directory,
|
||||
) -> crate::Result<()> {
|
||||
save_metas(
|
||||
&IndexMeta {
|
||||
index_settings,
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
},
|
||||
directory,
|
||||
)?;
|
||||
directory.sync_directory()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Save the index meta file.
|
||||
/// This operation is atomic:
|
||||
/// Either
|
||||
@@ -67,7 +38,7 @@ pub fn save_new_metas(
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
|
||||
pub(crate) fn save_metas(metas: &IndexMeta, directory: &dyn Directory) -> crate::Result<()> {
|
||||
info!("save metas");
|
||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||
// Just adding a new line at the end of the buffer.
|
||||
|
||||
@@ -80,8 +80,8 @@ impl SegmentWriter {
|
||||
pub fn for_segment(
|
||||
memory_budget_in_bytes: usize,
|
||||
segment: Segment,
|
||||
schema: Schema,
|
||||
) -> crate::Result<SegmentWriter> {
|
||||
let schema = segment.schema();
|
||||
let tokenizer_manager = segment.index().tokenizers().clone();
|
||||
let table_size = compute_initial_table_size(memory_budget_in_bytes)?;
|
||||
let segment_serializer = SegmentSerializer::for_segment(segment, false)?;
|
||||
|
||||
11
src/lib.rs
11
src/lib.rs
@@ -11,6 +11,7 @@
|
||||
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
|
||||
#![warn(missing_docs)]
|
||||
#![allow(clippy::len_without_is_empty)]
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
|
||||
//! # `tantivy`
|
||||
//!
|
||||
@@ -300,7 +301,7 @@ pub use self::docset::{DocSet, TERMINATED};
|
||||
pub use crate::core::{
|
||||
Executor, Index, IndexBuilder, IndexMeta, IndexSettings, IndexSortByField, InvertedIndexReader,
|
||||
Order, Searcher, SearcherGeneration, Segment, SegmentComponent, SegmentId, SegmentMeta,
|
||||
SegmentReader,
|
||||
SegmentReader, SingleSegmentIndexWriter,
|
||||
};
|
||||
pub use crate::directory::Directory;
|
||||
pub use crate::indexer::demuxer::*;
|
||||
@@ -420,6 +421,7 @@ pub struct DocAddress {
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use fastfield_codecs::Column;
|
||||
use rand::distributions::{Bernoulli, Uniform};
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
@@ -428,7 +430,6 @@ pub mod tests {
|
||||
use crate::collector::tests::TEST_COLLECTOR_WITH_SCORE;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::query::BooleanQuery;
|
||||
use crate::schema::*;
|
||||
@@ -1035,21 +1036,21 @@ pub mod tests {
|
||||
let fast_field_reader_opt = segment_reader.fast_fields().u64(fast_field_unsigned);
|
||||
assert!(fast_field_reader_opt.is_ok());
|
||||
let fast_field_reader = fast_field_reader_opt.unwrap();
|
||||
assert_eq!(fast_field_reader.get(0), 4u64)
|
||||
assert_eq!(fast_field_reader.get_val(0), 4u64)
|
||||
}
|
||||
|
||||
{
|
||||
let fast_field_reader_res = segment_reader.fast_fields().i64(fast_field_signed);
|
||||
assert!(fast_field_reader_res.is_ok());
|
||||
let fast_field_reader = fast_field_reader_res.unwrap();
|
||||
assert_eq!(fast_field_reader.get(0), 4i64)
|
||||
assert_eq!(fast_field_reader.get_val(0), 4i64)
|
||||
}
|
||||
|
||||
{
|
||||
let fast_field_reader_res = segment_reader.fast_fields().f64(fast_field_float);
|
||||
assert!(fast_field_reader_res.is_ok());
|
||||
let fast_field_reader = fast_field_reader_res.unwrap();
|
||||
assert_eq!(fast_field_reader.get(0), 4f64)
|
||||
assert_eq!(fast_field_reader.get_val(0), 4f64)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ pub mod tests {
|
||||
|
||||
{
|
||||
let mut segment_writer =
|
||||
SegmentWriter::for_segment(3_000_000, segment.clone(), schema).unwrap();
|
||||
SegmentWriter::for_segment(3_000_000, segment.clone()).unwrap();
|
||||
{
|
||||
// checking that position works if the field has two values
|
||||
let op = AddOperation {
|
||||
|
||||
@@ -116,7 +116,7 @@ pub(crate) struct IndexingPosition {
|
||||
/// and building a `Segment` in anonymous memory.
|
||||
///
|
||||
/// `PostingsWriter` writes in a `MemoryArena`.
|
||||
pub(crate) trait PostingsWriter {
|
||||
pub(crate) trait PostingsWriter: Send + Sync {
|
||||
/// Record that a document contains a term at a given position.
|
||||
///
|
||||
/// * doc - the document id
|
||||
|
||||
@@ -56,7 +56,7 @@ impl<'a> Iterator for VInt32Reader<'a> {
|
||||
/// * the document id
|
||||
/// * the term frequency
|
||||
/// * the term positions
|
||||
pub(crate) trait Recorder: Copy + Default + 'static {
|
||||
pub(crate) trait Recorder: Copy + Default + Send + Sync + 'static {
|
||||
/// Returns the current document
|
||||
fn current_doc(&self) -> u32;
|
||||
/// Starts recording information about a new document
|
||||
|
||||
@@ -371,7 +371,7 @@ mod tests {
|
||||
fn compute_checkpoints_manual(term_scorers: Vec<TermScorer>, n: usize) -> Vec<(DocId, Score)> {
|
||||
let mut heap: BinaryHeap<Float> = BinaryHeap::with_capacity(n);
|
||||
let mut checkpoints: Vec<(DocId, Score)> = Vec::new();
|
||||
let mut scorer: Union<TermScorer, SumCombiner> = Union::from(term_scorers);
|
||||
let mut scorer = Union::build(term_scorers, SumCombiner::default);
|
||||
|
||||
let mut limit = Score::MIN;
|
||||
loop {
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use super::boolean_weight::BooleanWeight;
|
||||
use crate::query::{Occur, Query, TermQuery, Weight};
|
||||
use crate::query::{Occur, Query, SumWithCoordsCombiner, TermQuery, Weight};
|
||||
use crate::schema::{IndexRecordOption, Term};
|
||||
use crate::Searcher;
|
||||
|
||||
@@ -153,12 +151,16 @@ impl Query for BooleanQuery {
|
||||
Ok((*occur, subquery.weight(searcher, scoring_enabled)?))
|
||||
})
|
||||
.collect::<crate::Result<_>>()?;
|
||||
Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled)))
|
||||
Ok(Box::new(BooleanWeight::new(
|
||||
sub_weights,
|
||||
scoring_enabled,
|
||||
Box::new(SumWithCoordsCombiner::default),
|
||||
)))
|
||||
}
|
||||
|
||||
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
|
||||
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
|
||||
for (_occur, subquery) in &self.subqueries {
|
||||
subquery.query_terms(terms);
|
||||
subquery.query_terms(visitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::collections::HashMap;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::postings::FreqReadingOption;
|
||||
use crate::query::explanation::does_not_match;
|
||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
|
||||
use crate::query::score_combiner::{DoNothingCombiner, ScoreCombiner};
|
||||
use crate::query::term_query::TermScorer;
|
||||
use crate::query::weight::{for_each_pruning_scorer, for_each_scorer};
|
||||
use crate::query::{
|
||||
@@ -17,8 +17,13 @@ enum SpecializedScorer {
|
||||
Other(Box<dyn Scorer>),
|
||||
}
|
||||
|
||||
fn scorer_union<TScoreCombiner>(scorers: Vec<Box<dyn Scorer>>) -> SpecializedScorer
|
||||
where TScoreCombiner: ScoreCombiner {
|
||||
fn scorer_union<TScoreCombiner>(
|
||||
scorers: Vec<Box<dyn Scorer>>,
|
||||
score_combiner_fn: impl Fn() -> TScoreCombiner,
|
||||
) -> SpecializedScorer
|
||||
where
|
||||
TScoreCombiner: ScoreCombiner,
|
||||
{
|
||||
assert!(!scorers.is_empty());
|
||||
if scorers.len() == 1 {
|
||||
return SpecializedScorer::Other(scorers.into_iter().next().unwrap()); //< we checked the size beforehand
|
||||
@@ -38,35 +43,45 @@ where TScoreCombiner: ScoreCombiner {
|
||||
// Block wand is only available if we read frequencies.
|
||||
return SpecializedScorer::TermUnion(scorers);
|
||||
} else {
|
||||
return SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(
|
||||
return SpecializedScorer::Other(Box::new(Union::build(
|
||||
scorers,
|
||||
score_combiner_fn,
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
SpecializedScorer::Other(Box::new(Union::<_, TScoreCombiner>::from(scorers)))
|
||||
SpecializedScorer::Other(Box::new(Union::build(scorers, score_combiner_fn)))
|
||||
}
|
||||
|
||||
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(scorer: SpecializedScorer) -> Box<dyn Scorer> {
|
||||
fn into_box_scorer<TScoreCombiner: ScoreCombiner>(
|
||||
scorer: SpecializedScorer,
|
||||
score_combiner_fn: impl Fn() -> TScoreCombiner,
|
||||
) -> Box<dyn Scorer> {
|
||||
match scorer {
|
||||
SpecializedScorer::TermUnion(term_scorers) => {
|
||||
let union_scorer = Union::<TermScorer, TScoreCombiner>::from(term_scorers);
|
||||
let union_scorer = Union::build(term_scorers, score_combiner_fn);
|
||||
Box::new(union_scorer)
|
||||
}
|
||||
SpecializedScorer::Other(scorer) => scorer,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BooleanWeight {
|
||||
pub struct BooleanWeight<TScoreCombiner: ScoreCombiner> {
|
||||
weights: Vec<(Occur, Box<dyn Weight>)>,
|
||||
scoring_enabled: bool,
|
||||
score_combiner_fn: Box<dyn Fn() -> TScoreCombiner + Sync + Send>,
|
||||
}
|
||||
|
||||
impl BooleanWeight {
|
||||
pub fn new(weights: Vec<(Occur, Box<dyn Weight>)>, scoring_enabled: bool) -> BooleanWeight {
|
||||
impl<TScoreCombiner: ScoreCombiner> BooleanWeight<TScoreCombiner> {
|
||||
pub fn new(
|
||||
weights: Vec<(Occur, Box<dyn Weight>)>,
|
||||
scoring_enabled: bool,
|
||||
score_combiner_fn: Box<dyn Fn() -> TScoreCombiner + Sync + Send + 'static>,
|
||||
) -> BooleanWeight<TScoreCombiner> {
|
||||
BooleanWeight {
|
||||
weights,
|
||||
scoring_enabled,
|
||||
score_combiner_fn,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,21 +101,23 @@ impl BooleanWeight {
|
||||
Ok(per_occur_scorers)
|
||||
}
|
||||
|
||||
fn complex_scorer<TScoreCombiner: ScoreCombiner>(
|
||||
fn complex_scorer<TComplexScoreCombiner: ScoreCombiner>(
|
||||
&self,
|
||||
reader: &SegmentReader,
|
||||
boost: Score,
|
||||
score_combiner_fn: impl Fn() -> TComplexScoreCombiner,
|
||||
) -> crate::Result<SpecializedScorer> {
|
||||
let mut per_occur_scorers = self.per_occur_scorers(reader, boost)?;
|
||||
|
||||
let should_scorer_opt: Option<SpecializedScorer> = per_occur_scorers
|
||||
.remove(&Occur::Should)
|
||||
.map(scorer_union::<TScoreCombiner>);
|
||||
|
||||
.map(|scorers| scorer_union(scorers, &score_combiner_fn));
|
||||
let exclude_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||
.remove(&Occur::MustNot)
|
||||
.map(scorer_union::<DoNothingCombiner>)
|
||||
.map(into_box_scorer::<DoNothingCombiner>);
|
||||
.map(|scorers| scorer_union(scorers, DoNothingCombiner::default))
|
||||
.map(|specialized_scorer| {
|
||||
into_box_scorer(specialized_scorer, DoNothingCombiner::default)
|
||||
});
|
||||
|
||||
let must_scorer_opt: Option<Box<dyn Scorer>> = per_occur_scorers
|
||||
.remove(&Occur::Must)
|
||||
@@ -112,10 +129,10 @@ impl BooleanWeight {
|
||||
SpecializedScorer::Other(Box::new(RequiredOptionalScorer::<
|
||||
Box<dyn Scorer>,
|
||||
Box<dyn Scorer>,
|
||||
TScoreCombiner,
|
||||
TComplexScoreCombiner,
|
||||
>::new(
|
||||
must_scorer,
|
||||
into_box_scorer::<TScoreCombiner>(should_scorer),
|
||||
into_box_scorer(should_scorer, &score_combiner_fn),
|
||||
)))
|
||||
} else {
|
||||
SpecializedScorer::Other(must_scorer)
|
||||
@@ -129,8 +146,7 @@ impl BooleanWeight {
|
||||
};
|
||||
|
||||
if let Some(exclude_scorer) = exclude_scorer_opt {
|
||||
let positive_scorer_boxed: Box<dyn Scorer> =
|
||||
into_box_scorer::<TScoreCombiner>(positive_scorer);
|
||||
let positive_scorer_boxed = into_box_scorer(positive_scorer, &score_combiner_fn);
|
||||
Ok(SpecializedScorer::Other(Box::new(Exclude::new(
|
||||
positive_scorer_boxed,
|
||||
exclude_scorer,
|
||||
@@ -141,7 +157,7 @@ impl BooleanWeight {
|
||||
}
|
||||
}
|
||||
|
||||
impl Weight for BooleanWeight {
|
||||
impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombiner> {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
if self.weights.is_empty() {
|
||||
Ok(Box::new(EmptyScorer))
|
||||
@@ -153,13 +169,15 @@ impl Weight for BooleanWeight {
|
||||
weight.scorer(reader, boost)
|
||||
}
|
||||
} else if self.scoring_enabled {
|
||||
self.complex_scorer::<SumWithCoordsCombiner>(reader, boost)
|
||||
self.complex_scorer(reader, boost, &self.score_combiner_fn)
|
||||
.map(|specialized_scorer| {
|
||||
into_box_scorer::<SumWithCoordsCombiner>(specialized_scorer)
|
||||
into_box_scorer(specialized_scorer, &self.score_combiner_fn)
|
||||
})
|
||||
} else {
|
||||
self.complex_scorer::<DoNothingCombiner>(reader, boost)
|
||||
.map(into_box_scorer::<DoNothingCombiner>)
|
||||
self.complex_scorer(reader, boost, &DoNothingCombiner::default)
|
||||
.map(|specialized_scorer| {
|
||||
into_box_scorer(specialized_scorer, &DoNothingCombiner::default)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,11 +206,10 @@ impl Weight for BooleanWeight {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score),
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
|
||||
match scorer {
|
||||
SpecializedScorer::TermUnion(term_scorers) => {
|
||||
let mut union_scorer =
|
||||
Union::<TermScorer, SumWithCoordsCombiner>::from(term_scorers);
|
||||
let mut union_scorer = Union::build(term_scorers, &self.score_combiner_fn);
|
||||
for_each_scorer(&mut union_scorer, callback);
|
||||
}
|
||||
SpecializedScorer::Other(mut scorer) => {
|
||||
@@ -218,7 +235,7 @@ impl Weight for BooleanWeight {
|
||||
reader: &SegmentReader,
|
||||
callback: &mut dyn FnMut(DocId, Score) -> Score,
|
||||
) -> crate::Result<()> {
|
||||
let scorer = self.complex_scorer::<SumWithCoordsCombiner>(reader, 1.0)?;
|
||||
let scorer = self.complex_scorer(reader, 1.0, &self.score_combiner_fn)?;
|
||||
match scorer {
|
||||
SpecializedScorer::TermUnion(term_scorers) => {
|
||||
super::block_wand(term_scorers, threshold, callback);
|
||||
|
||||
@@ -4,6 +4,7 @@ mod boolean_weight;
|
||||
|
||||
pub(crate) use self::block_wand::{block_wand, block_wand_single_scorer};
|
||||
pub use self::boolean_query::BooleanQuery;
|
||||
pub(crate) use self::boolean_weight::BooleanWeight;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
|
||||
use crate::fastfield::AliveBitSet;
|
||||
@@ -49,8 +48,8 @@ impl Query for BoostQuery {
|
||||
Ok(boosted_weight)
|
||||
}
|
||||
|
||||
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
|
||||
self.query.query_terms(terms)
|
||||
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
|
||||
self.query.query_terms(visitor)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
176
src/query/const_score_query.rs
Normal file
176
src/query/const_score_query.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use std::fmt;
|
||||
|
||||
use crate::query::{Explanation, Query, Scorer, Weight};
|
||||
use crate::{DocId, DocSet, Score, Searcher, SegmentReader, TantivyError, Term};
|
||||
|
||||
/// `ConstScoreQuery` is a wrapper over a query to provide a constant score.
|
||||
/// It can avoid unnecessary score computation on the wrapped query.
|
||||
///
|
||||
/// The document set matched by the `ConstScoreQuery` is strictly the same as the underlying query.
|
||||
/// The configured score is used for each document.
|
||||
pub struct ConstScoreQuery {
|
||||
query: Box<dyn Query>,
|
||||
score: Score,
|
||||
}
|
||||
|
||||
impl ConstScoreQuery {
|
||||
/// Builds a const score query.
|
||||
pub fn new(query: Box<dyn Query>, score: Score) -> ConstScoreQuery {
|
||||
ConstScoreQuery { query, score }
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ConstScoreQuery {
|
||||
fn clone(&self) -> Self {
|
||||
ConstScoreQuery {
|
||||
query: self.query.box_clone(),
|
||||
score: self.score,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ConstScoreQuery {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Const(score={}, query={:?})", self.score, self.query)
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for ConstScoreQuery {
|
||||
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
|
||||
let inner_weight = self.query.weight(searcher, scoring_enabled)?;
|
||||
Ok(if scoring_enabled {
|
||||
Box::new(ConstWeight::new(inner_weight, self.score))
|
||||
} else {
|
||||
inner_weight
|
||||
})
|
||||
}
|
||||
|
||||
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
|
||||
self.query.query_terms(visitor);
|
||||
}
|
||||
}
|
||||
|
||||
struct ConstWeight {
|
||||
weight: Box<dyn Weight>,
|
||||
score: Score,
|
||||
}
|
||||
|
||||
impl ConstWeight {
|
||||
pub fn new(weight: Box<dyn Weight>, score: Score) -> Self {
|
||||
ConstWeight { weight, score }
|
||||
}
|
||||
}
|
||||
|
||||
impl Weight for ConstWeight {
|
||||
fn scorer(&self, reader: &SegmentReader, boost: Score) -> crate::Result<Box<dyn Scorer>> {
|
||||
let inner_scorer = self.weight.scorer(reader, boost)?;
|
||||
Ok(Box::new(ConstScorer::new(inner_scorer, boost * self.score)))
|
||||
}
|
||||
|
||||
fn explain(&self, reader: &SegmentReader, doc: u32) -> crate::Result<Explanation> {
|
||||
let mut scorer = self.scorer(reader, 1.0)?;
|
||||
if scorer.seek(doc) != doc {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Document #({}) does not match",
|
||||
doc
|
||||
)));
|
||||
}
|
||||
let mut explanation = Explanation::new("Const", self.score);
|
||||
let underlying_explanation = self.weight.explain(reader, doc)?;
|
||||
explanation.add_detail(underlying_explanation);
|
||||
Ok(explanation)
|
||||
}
|
||||
|
||||
fn count(&self, reader: &SegmentReader) -> crate::Result<u32> {
|
||||
self.weight.count(reader)
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `DocSet` and simply returns a constant `Scorer`.
|
||||
/// The `ConstScorer` is useful if you have a `DocSet` where
|
||||
/// you needed a scorer.
|
||||
///
|
||||
/// The `ConstScorer`'s constant score can be set
|
||||
/// by calling `.set_score(...)`.
|
||||
pub struct ConstScorer<TDocSet: DocSet> {
|
||||
docset: TDocSet,
|
||||
score: Score,
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
|
||||
/// Creates a new `ConstScorer`.
|
||||
pub fn new(docset: TDocSet, score: Score) -> ConstScorer<TDocSet> {
|
||||
ConstScorer { docset, score }
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
|
||||
fn from(docset: TDocSet) -> Self {
|
||||
ConstScorer::new(docset, 1.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
||||
fn advance(&mut self) -> DocId {
|
||||
self.docset.advance()
|
||||
}
|
||||
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
self.docset.seek(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
self.docset.fill_buffer(buffer)
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
self.docset.doc()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
self.docset.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
|
||||
fn score(&mut self) -> Score {
|
||||
self.score
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ConstScoreQuery;
|
||||
use crate::query::{AllQuery, Query};
|
||||
use crate::schema::Schema;
|
||||
use crate::{DocAddress, Document, Index};
|
||||
|
||||
#[test]
|
||||
fn test_const_score_query_explain() -> crate::Result<()> {
|
||||
let schema = Schema::builder().build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(Document::new())?;
|
||||
index_writer.commit()?;
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
let query = ConstScoreQuery::new(Box::new(AllQuery), 0.42);
|
||||
let explanation = query.explain(&searcher, DocAddress::new(0, 0u32)).unwrap();
|
||||
assert_eq!(
|
||||
explanation.to_pretty_json(),
|
||||
r#"{
|
||||
"value": 0.42,
|
||||
"description": "Const",
|
||||
"details": [
|
||||
{
|
||||
"value": 1.0,
|
||||
"description": "AllQuery",
|
||||
"context": []
|
||||
}
|
||||
],
|
||||
"context": []
|
||||
}"#
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
131
src/query/disjunction_max_query.rs
Normal file
131
src/query/disjunction_max_query.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use tantivy_query_grammar::Occur;
|
||||
|
||||
use crate::query::{BooleanWeight, DisjunctionMaxCombiner, Query, Weight};
|
||||
use crate::{Score, Searcher, Term};
|
||||
|
||||
/// The disjunction max query кeturns documents matching one or more wrapped queries,
|
||||
/// called query clauses or clauses.
|
||||
///
|
||||
/// If a returned document matches multiple query clauses,
|
||||
/// the `DisjunctionMaxQuery` assigns the document the highest relevance score from any matching
|
||||
/// clause, plus a tie breaking increment for any additional matching subqueries.
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::doc;
|
||||
/// use tantivy::query::{DisjunctionMaxQuery, Query, QueryClone, TermQuery};
|
||||
/// use tantivy::schema::{IndexRecordOption, Schema, TEXT};
|
||||
/// use tantivy::Term;
|
||||
/// use tantivy::Index;
|
||||
///
|
||||
/// fn main() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let body = schema_builder.add_text_field("body", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of Girl",
|
||||
/// ))?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ))?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Girl",
|
||||
/// ))?;
|
||||
/// index_writer.commit()?;
|
||||
/// }
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// // Make TermQuery's for "girl" and "diary" in the title
|
||||
/// let girl_term_query: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
/// Term::from_field_text(title, "girl"),
|
||||
/// IndexRecordOption::Basic,
|
||||
/// ));
|
||||
/// let diary_term_query: Box<dyn Query> = Box::new(TermQuery::new(
|
||||
/// Term::from_field_text(title, "diary"),
|
||||
/// IndexRecordOption::Basic,
|
||||
/// ));
|
||||
///
|
||||
/// // TermQuery "diary" and "girl" should be present and only one should be accounted in score
|
||||
/// let queries1 = vec![diary_term_query.box_clone(), girl_term_query.box_clone()];
|
||||
/// let diary_and_girl = DisjunctionMaxQuery::new(queries1);
|
||||
/// let documents = searcher.search(&diary_and_girl, &TopDocs::with_limit(3))?;
|
||||
/// assert_eq!(documents[0].0, documents[1].0);
|
||||
/// assert_eq!(documents[1].0, documents[2].0);
|
||||
///
|
||||
/// // TermQuery "diary" and "girl" should be present
|
||||
/// // and one should be accounted with multiplier 0.7
|
||||
/// let queries2 = vec![diary_term_query.box_clone(), girl_term_query.box_clone()];
|
||||
/// let tie_breaker = 0.7;
|
||||
/// let diary_and_girl_with_tie_breaker = DisjunctionMaxQuery::with_tie_breaker(queries2, tie_breaker);
|
||||
/// let documents = searcher.search(&diary_and_girl_with_tie_breaker, &TopDocs::with_limit(3))?;
|
||||
/// assert_eq!(documents[1].0, documents[2].0);
|
||||
/// // For this test all terms brings the same score. So we can do easy math and assume that
|
||||
/// // `DisjunctionMaxQuery` with tie breakers score should be equal
|
||||
/// // to term1 score + `tie_breaker` * term2 score or (1.0 + tie_breaker) * term score
|
||||
/// assert!(f32::abs(documents[0].0 - documents[1].0 * (1.0 + tie_breaker)) < 0.001);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct DisjunctionMaxQuery {
|
||||
disjuncts: Vec<Box<dyn Query>>,
|
||||
tie_breaker: Score,
|
||||
}
|
||||
|
||||
impl Clone for DisjunctionMaxQuery {
|
||||
fn clone(&self) -> Self {
|
||||
DisjunctionMaxQuery::with_tie_breaker(
|
||||
self.disjuncts
|
||||
.iter()
|
||||
.map(|disjunct| disjunct.box_clone())
|
||||
.collect::<Vec<_>>(),
|
||||
self.tie_breaker,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for DisjunctionMaxQuery {
|
||||
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
|
||||
let disjuncts = self
|
||||
.disjuncts
|
||||
.iter()
|
||||
.map(|disjunct| Ok((Occur::Should, disjunct.weight(searcher, scoring_enabled)?)))
|
||||
.collect::<crate::Result<_>>()?;
|
||||
let tie_breaker = self.tie_breaker;
|
||||
Ok(Box::new(BooleanWeight::new(
|
||||
disjuncts,
|
||||
scoring_enabled,
|
||||
Box::new(move || DisjunctionMaxCombiner::with_tie_breaker(tie_breaker)),
|
||||
)))
|
||||
}
|
||||
|
||||
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
|
||||
for disjunct in &self.disjuncts {
|
||||
disjunct.query_terms(visitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DisjunctionMaxQuery {
|
||||
/// Creates a new `DisjunctionMaxQuery` with tie breaker.
|
||||
pub fn with_tie_breaker(
|
||||
disjuncts: Vec<Box<dyn Query>>,
|
||||
tie_breaker: Score,
|
||||
) -> DisjunctionMaxQuery {
|
||||
DisjunctionMaxQuery {
|
||||
disjuncts,
|
||||
tie_breaker,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `DisjunctionMaxQuery` with no tie breaker.
|
||||
pub fn new(disjuncts: Vec<Box<dyn Query>>) -> DisjunctionMaxQuery {
|
||||
DisjunctionMaxQuery::with_tie_breaker(disjuncts, 0.0)
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,8 @@ mod bitset;
|
||||
mod bm25;
|
||||
mod boolean_query;
|
||||
mod boost_query;
|
||||
mod const_score_query;
|
||||
mod disjunction_max_query;
|
||||
mod empty_query;
|
||||
mod exclude;
|
||||
mod explanation;
|
||||
@@ -34,7 +36,10 @@ pub use self::automaton_weight::AutomatonWeight;
|
||||
pub use self::bitset::BitSetDocSet;
|
||||
pub(crate) use self::bm25::Bm25Weight;
|
||||
pub use self::boolean_query::BooleanQuery;
|
||||
pub(crate) use self::boolean_query::BooleanWeight;
|
||||
pub use self::boost_query::BoostQuery;
|
||||
pub use self::const_score_query::{ConstScoreQuery, ConstScorer};
|
||||
pub use self::disjunction_max_query::DisjunctionMaxQuery;
|
||||
pub use self::empty_query::{EmptyQuery, EmptyScorer, EmptyWeight};
|
||||
pub use self::exclude::Exclude;
|
||||
pub use self::explanation::Explanation;
|
||||
@@ -49,7 +54,10 @@ pub use self::query_parser::{QueryParser, QueryParserError};
|
||||
pub use self::range_query::RangeQuery;
|
||||
pub use self::regex_query::RegexQuery;
|
||||
pub use self::reqopt_scorer::RequiredOptionalScorer;
|
||||
pub use self::scorer::{ConstScorer, Scorer};
|
||||
pub use self::score_combiner::{
|
||||
DisjunctionMaxCombiner, ScoreCombiner, SumCombiner, SumWithCoordsCombiner,
|
||||
};
|
||||
pub use self::scorer::Scorer;
|
||||
pub use self::term_query::TermQuery;
|
||||
pub use self::union::Union;
|
||||
#[cfg(test)]
|
||||
@@ -58,8 +66,6 @@ pub use self::weight::Weight;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::{Index, Term};
|
||||
@@ -74,49 +80,34 @@ mod tests {
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
{
|
||||
let mut terms: BTreeMap<Term, bool> = Default::default();
|
||||
query_parser
|
||||
.parse_query("a")
|
||||
.unwrap()
|
||||
.query_terms(&mut terms);
|
||||
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
|
||||
assert_eq!(vec![(&term_a, &false)], terms);
|
||||
let query = query_parser.parse_query("a").unwrap();
|
||||
let mut terms = Vec::new();
|
||||
query.query_terms(&mut |term, pos| terms.push((term, pos)));
|
||||
assert_eq!(vec![(&term_a, false)], terms);
|
||||
}
|
||||
{
|
||||
let mut terms: BTreeMap<Term, bool> = Default::default();
|
||||
query_parser
|
||||
.parse_query("a b")
|
||||
.unwrap()
|
||||
.query_terms(&mut terms);
|
||||
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
|
||||
assert_eq!(vec![(&term_a, &false), (&term_b, &false)], terms);
|
||||
let query = query_parser.parse_query("a b").unwrap();
|
||||
let mut terms = Vec::new();
|
||||
query.query_terms(&mut |term, pos| terms.push((term, pos)));
|
||||
assert_eq!(vec![(&term_a, false), (&term_b, false)], terms);
|
||||
}
|
||||
{
|
||||
let mut terms: BTreeMap<Term, bool> = Default::default();
|
||||
query_parser
|
||||
.parse_query("\"a b\"")
|
||||
.unwrap()
|
||||
.query_terms(&mut terms);
|
||||
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
|
||||
assert_eq!(vec![(&term_a, &true), (&term_b, &true)], terms);
|
||||
let query = query_parser.parse_query("\"a b\"").unwrap();
|
||||
let mut terms = Vec::new();
|
||||
query.query_terms(&mut |term, pos| terms.push((term, pos)));
|
||||
assert_eq!(vec![(&term_a, true), (&term_b, true)], terms);
|
||||
}
|
||||
{
|
||||
let mut terms: BTreeMap<Term, bool> = Default::default();
|
||||
query_parser
|
||||
.parse_query("a a a a a")
|
||||
.unwrap()
|
||||
.query_terms(&mut terms);
|
||||
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
|
||||
assert_eq!(vec![(&term_a, &false)], terms);
|
||||
let query = query_parser.parse_query("a a a a a").unwrap();
|
||||
let mut terms = Vec::new();
|
||||
query.query_terms(&mut |term, pos| terms.push((term, pos)));
|
||||
assert_eq!(vec![(&term_a, false); 5], terms);
|
||||
}
|
||||
{
|
||||
let mut terms: BTreeMap<Term, bool> = Default::default();
|
||||
query_parser
|
||||
.parse_query("a -b")
|
||||
.unwrap()
|
||||
.query_terms(&mut terms);
|
||||
let terms: Vec<(&Term, &bool)> = terms.iter().collect();
|
||||
assert_eq!(vec![(&term_a, &false), (&term_b, &false)], terms);
|
||||
let query = query_parser.parse_query("a -b").unwrap();
|
||||
let mut terms = Vec::new();
|
||||
query.query_terms(&mut |term, pos| terms.push((term, pos)));
|
||||
assert_eq!(vec![(&term_a, false), (&term_b, false)], terms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use super::PhraseWeight;
|
||||
use crate::core::searcher::Searcher;
|
||||
use crate::query::bm25::Bm25Weight;
|
||||
@@ -129,9 +127,9 @@ impl Query for PhraseQuery {
|
||||
Ok(Box::new(phrase_weight))
|
||||
}
|
||||
|
||||
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
|
||||
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
|
||||
for (_, term) in &self.phrase_terms {
|
||||
terms.insert(term.clone(), true);
|
||||
visitor(term, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
|
||||
use downcast_rs::impl_downcast;
|
||||
@@ -67,12 +66,15 @@ pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Extract all of the terms associated to the query and insert them in the
|
||||
/// term set given in arguments.
|
||||
/// Extract all of the terms associated to the query and pass them to the
|
||||
/// given closure.
|
||||
///
|
||||
/// Each term is associated with a boolean indicating whether
|
||||
/// Positions are required or not.
|
||||
fn query_terms(&self, _term_set: &mut BTreeMap<Term, bool>) {}
|
||||
/// positions are required or not.
|
||||
///
|
||||
/// Note that there can be multiple instances of any given term
|
||||
/// in a query and deduplication must be handled by the visitor.
|
||||
fn query_terms<'a>(&'a self, _visitor: &mut dyn FnMut(&'a Term, bool)) {}
|
||||
}
|
||||
|
||||
/// Implements `box_clone`.
|
||||
@@ -98,8 +100,8 @@ impl Query for Box<dyn Query> {
|
||||
self.as_ref().count(searcher)
|
||||
}
|
||||
|
||||
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
|
||||
self.as_ref().query_terms(terms);
|
||||
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
|
||||
self.as_ref().query_terms(visitor);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ pub struct QueryParser {
|
||||
fn all_negative(ast: &LogicalAst) -> bool {
|
||||
match ast {
|
||||
LogicalAst::Leaf(_) => false,
|
||||
LogicalAst::Boost(ref child_ast, _) => all_negative(&*child_ast),
|
||||
LogicalAst::Boost(ref child_ast, _) => all_negative(child_ast),
|
||||
LogicalAst::Clause(children) => children
|
||||
.iter()
|
||||
.all(|(ref occur, child)| (*occur == Occur::MustNot) || all_negative(child)),
|
||||
|
||||
@@ -77,3 +77,40 @@ impl ScoreCombiner for SumWithCoordsCombiner {
|
||||
self.score
|
||||
}
|
||||
}
|
||||
|
||||
/// Take max score of different scorers
|
||||
/// and optionally sum it with other matches multiplied by `tie_breaker`
|
||||
#[derive(Default, Clone, Copy)]
|
||||
pub struct DisjunctionMaxCombiner {
|
||||
max: Score,
|
||||
sum: Score,
|
||||
tie_breaker: Score,
|
||||
}
|
||||
|
||||
impl DisjunctionMaxCombiner {
|
||||
/// Creates `DisjunctionMaxCombiner` with tie breaker
|
||||
pub fn with_tie_breaker(tie_breaker: Score) -> DisjunctionMaxCombiner {
|
||||
DisjunctionMaxCombiner {
|
||||
max: 0.0,
|
||||
sum: 0.0,
|
||||
tie_breaker,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ScoreCombiner for DisjunctionMaxCombiner {
|
||||
fn update<TScorer: Scorer>(&mut self, scorer: &mut TScorer) {
|
||||
let score = scorer.score();
|
||||
self.max = Score::max(score, self.max);
|
||||
self.sum += score;
|
||||
}
|
||||
|
||||
fn clear(&mut self) {
|
||||
self.max = 0.0;
|
||||
self.sum = 0.0;
|
||||
}
|
||||
|
||||
fn score(&self) -> Score {
|
||||
self.max + (self.sum - self.max) * self.tie_breaker
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::ops::DerefMut;
|
||||
use downcast_rs::impl_downcast;
|
||||
|
||||
use crate::docset::DocSet;
|
||||
use crate::{DocId, Score};
|
||||
use crate::Score;
|
||||
|
||||
/// Scored set of documents matching a query within a specific segment.
|
||||
///
|
||||
@@ -22,55 +22,3 @@ impl Scorer for Box<dyn Scorer> {
|
||||
self.deref_mut().score()
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `DocSet` and simply returns a constant `Scorer`.
|
||||
/// The `ConstScorer` is useful if you have a `DocSet` where
|
||||
/// you needed a scorer.
|
||||
///
|
||||
/// The `ConstScorer`'s constant score can be set
|
||||
/// by calling `.set_score(...)`.
|
||||
pub struct ConstScorer<TDocSet: DocSet> {
|
||||
docset: TDocSet,
|
||||
score: Score,
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> ConstScorer<TDocSet> {
|
||||
/// Creates a new `ConstScorer`.
|
||||
pub fn new(docset: TDocSet, score: Score) -> ConstScorer<TDocSet> {
|
||||
ConstScorer { docset, score }
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> From<TDocSet> for ConstScorer<TDocSet> {
|
||||
fn from(docset: TDocSet) -> Self {
|
||||
ConstScorer::new(docset, 1.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet> DocSet for ConstScorer<TDocSet> {
|
||||
fn advance(&mut self) -> DocId {
|
||||
self.docset.advance()
|
||||
}
|
||||
|
||||
fn seek(&mut self, target: DocId) -> DocId {
|
||||
self.docset.seek(target)
|
||||
}
|
||||
|
||||
fn fill_buffer(&mut self, buffer: &mut [DocId]) -> usize {
|
||||
self.docset.fill_buffer(buffer)
|
||||
}
|
||||
|
||||
fn doc(&self) -> DocId {
|
||||
self.docset.doc()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
self.docset.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet + 'static> Scorer for ConstScorer<TDocSet> {
|
||||
fn score(&mut self) -> Score {
|
||||
self.score
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
|
||||
use super::term_weight::TermWeight;
|
||||
@@ -121,7 +120,7 @@ impl Query for TermQuery {
|
||||
self.specialized_weight(searcher, scoring_enabled)?,
|
||||
))
|
||||
}
|
||||
fn query_terms(&self, terms: &mut BTreeMap<Term, bool>) {
|
||||
terms.insert(self.term.clone(), false);
|
||||
fn query_terms<'a>(&'a self, visitor: &mut dyn FnMut(&'a Term, bool)) {
|
||||
visitor(&self.term, false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,34 +36,6 @@ pub struct Union<TScorer, TScoreCombiner = DoNothingCombiner> {
|
||||
score: Score,
|
||||
}
|
||||
|
||||
impl<TScorer, TScoreCombiner> From<Vec<TScorer>> for Union<TScorer, TScoreCombiner>
|
||||
where
|
||||
TScoreCombiner: ScoreCombiner,
|
||||
TScorer: Scorer,
|
||||
{
|
||||
fn from(docsets: Vec<TScorer>) -> Union<TScorer, TScoreCombiner> {
|
||||
let non_empty_docsets: Vec<TScorer> = docsets
|
||||
.into_iter()
|
||||
.filter(|docset| docset.doc() != TERMINATED)
|
||||
.collect();
|
||||
let mut union = Union {
|
||||
docsets: non_empty_docsets,
|
||||
bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]),
|
||||
scores: Box::new([TScoreCombiner::default(); HORIZON as usize]),
|
||||
cursor: HORIZON_NUM_TINYBITSETS,
|
||||
offset: 0,
|
||||
doc: 0,
|
||||
score: 0.0,
|
||||
};
|
||||
if union.refill() {
|
||||
union.advance();
|
||||
} else {
|
||||
union.doc = TERMINATED;
|
||||
}
|
||||
union
|
||||
}
|
||||
}
|
||||
|
||||
fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
|
||||
scorers: &mut Vec<TScorer>,
|
||||
bitsets: &mut [TinySet; HORIZON_NUM_TINYBITSETS],
|
||||
@@ -90,6 +62,31 @@ fn refill<TScorer: Scorer, TScoreCombiner: ScoreCombiner>(
|
||||
}
|
||||
|
||||
impl<TScorer: Scorer, TScoreCombiner: ScoreCombiner> Union<TScorer, TScoreCombiner> {
|
||||
pub(crate) fn build(
|
||||
docsets: Vec<TScorer>,
|
||||
score_combiner_fn: impl FnOnce() -> TScoreCombiner,
|
||||
) -> Union<TScorer, TScoreCombiner> {
|
||||
let non_empty_docsets: Vec<TScorer> = docsets
|
||||
.into_iter()
|
||||
.filter(|docset| docset.doc() != TERMINATED)
|
||||
.collect();
|
||||
let mut union = Union {
|
||||
docsets: non_empty_docsets,
|
||||
bitsets: Box::new([TinySet::empty(); HORIZON_NUM_TINYBITSETS]),
|
||||
scores: Box::new([score_combiner_fn(); HORIZON as usize]),
|
||||
cursor: HORIZON_NUM_TINYBITSETS,
|
||||
offset: 0,
|
||||
doc: 0,
|
||||
score: 0.0,
|
||||
};
|
||||
if union.refill() {
|
||||
union.advance();
|
||||
} else {
|
||||
union.doc = TERMINATED;
|
||||
}
|
||||
union
|
||||
}
|
||||
|
||||
fn refill(&mut self) -> bool {
|
||||
if let Some(min_doc) = self.docsets.iter().map(DocSet::doc).min() {
|
||||
self.offset = min_doc;
|
||||
@@ -179,7 +176,6 @@ where
|
||||
|
||||
// The target is outside of the buffered horizon.
|
||||
// advance all docsets to a doc >= to the target.
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::clippy::collapsible_if))]
|
||||
unordered_drain_filter(&mut self.docsets, |docset| {
|
||||
if docset.doc() < target {
|
||||
docset.seek(target);
|
||||
@@ -266,12 +262,13 @@ mod tests {
|
||||
let union_vals: Vec<u32> = val_set.into_iter().collect();
|
||||
let mut union_expected = VecDocSet::from(union_vals);
|
||||
let make_union = || {
|
||||
Union::from(
|
||||
Union::build(
|
||||
vals.iter()
|
||||
.cloned()
|
||||
.map(VecDocSet::from)
|
||||
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||
.collect::<Vec<ConstScorer<VecDocSet>>>(),
|
||||
DoNothingCombiner::default,
|
||||
)
|
||||
};
|
||||
let mut union: Union<_, DoNothingCombiner> = make_union();
|
||||
@@ -312,13 +309,14 @@ mod tests {
|
||||
btree_set.extend(docs.iter().cloned());
|
||||
}
|
||||
let docset_factory = || {
|
||||
let res: Box<dyn DocSet> = Box::new(Union::<_, DoNothingCombiner>::from(
|
||||
let res: Box<dyn DocSet> = Box::new(Union::build(
|
||||
docs_list
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(VecDocSet::from)
|
||||
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||
.collect::<Vec<_>>(),
|
||||
DoNothingCombiner::default,
|
||||
));
|
||||
res
|
||||
};
|
||||
@@ -346,10 +344,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_union_skip_corner_case3() {
|
||||
let mut docset = Union::<_, DoNothingCombiner>::from(vec![
|
||||
ConstScorer::from(VecDocSet::from(vec![0u32, 5u32])),
|
||||
ConstScorer::from(VecDocSet::from(vec![1u32, 4u32])),
|
||||
]);
|
||||
let mut docset = Union::build(
|
||||
vec![
|
||||
ConstScorer::from(VecDocSet::from(vec![0u32, 5u32])),
|
||||
ConstScorer::from(VecDocSet::from(vec![1u32, 4u32])),
|
||||
],
|
||||
DoNothingCombiner::default,
|
||||
);
|
||||
assert_eq!(docset.doc(), 0u32);
|
||||
assert_eq!(docset.seek(0u32), 0u32);
|
||||
assert_eq!(docset.seek(0u32), 0u32);
|
||||
@@ -405,12 +406,13 @@ mod bench {
|
||||
tests::sample_with_seed(100_000, 0.2, 1),
|
||||
];
|
||||
bench.iter(|| {
|
||||
let mut v = Union::<_, DoNothingCombiner>::from(
|
||||
let mut v = Union::build(
|
||||
union_docset
|
||||
.iter()
|
||||
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
|
||||
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||
.collect::<Vec<_>>(),
|
||||
DoNothingCombiner::default,
|
||||
);
|
||||
while v.doc() != TERMINATED {
|
||||
v.advance();
|
||||
@@ -425,12 +427,13 @@ mod bench {
|
||||
tests::sample_with_seed(100_000, 0.001, 2),
|
||||
];
|
||||
bench.iter(|| {
|
||||
let mut v = Union::<_, DoNothingCombiner>::from(
|
||||
let mut v = Union::build(
|
||||
union_docset
|
||||
.iter()
|
||||
.map(|doc_ids| VecDocSet::from(doc_ids.clone()))
|
||||
.map(|docset| ConstScorer::new(docset, 1.0))
|
||||
.collect::<Vec<_>>(),
|
||||
DoNothingCombiner::default,
|
||||
);
|
||||
while v.doc() != TERMINATED {
|
||||
v.advance();
|
||||
|
||||
@@ -164,21 +164,18 @@ impl InnerIndexReader {
|
||||
doc_store_cache_size: usize,
|
||||
index: Index,
|
||||
warming_state: WarmingState,
|
||||
// The searcher_generation_inventory is not used as source, but as target to track the
|
||||
// loaded segments.
|
||||
searcher_generation_inventory: Inventory<SearcherGeneration>,
|
||||
) -> crate::Result<Self> {
|
||||
let searcher_generation_counter: Arc<AtomicU64> = Default::default();
|
||||
let segment_readers = Self::open_segment_readers(&index)?;
|
||||
let searcher_generation = Self::create_new_searcher_generation(
|
||||
&segment_readers,
|
||||
&searcher_generation_counter,
|
||||
&searcher_generation_inventory,
|
||||
);
|
||||
|
||||
let searcher = Self::create_searcher(
|
||||
&index,
|
||||
doc_store_cache_size,
|
||||
&warming_state,
|
||||
searcher_generation,
|
||||
&searcher_generation_counter,
|
||||
&searcher_generation_inventory,
|
||||
)?;
|
||||
Ok(InnerIndexReader {
|
||||
doc_store_cache_size,
|
||||
@@ -204,12 +201,12 @@ impl InnerIndexReader {
|
||||
Ok(segment_readers)
|
||||
}
|
||||
|
||||
fn create_new_searcher_generation(
|
||||
fn track_segment_readers_in_inventory(
|
||||
segment_readers: &[SegmentReader],
|
||||
searcher_generation_counter: &Arc<AtomicU64>,
|
||||
searcher_generation_inventory: &Inventory<SearcherGeneration>,
|
||||
) -> TrackedObject<SearcherGeneration> {
|
||||
let generation_id = searcher_generation_counter.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
let generation_id = searcher_generation_counter.fetch_add(1, atomic::Ordering::AcqRel);
|
||||
let searcher_generation =
|
||||
SearcherGeneration::from_segment_readers(segment_readers, generation_id);
|
||||
searcher_generation_inventory.track(searcher_generation)
|
||||
@@ -219,9 +216,16 @@ impl InnerIndexReader {
|
||||
index: &Index,
|
||||
doc_store_cache_size: usize,
|
||||
warming_state: &WarmingState,
|
||||
searcher_generation: TrackedObject<SearcherGeneration>,
|
||||
searcher_generation_counter: &Arc<AtomicU64>,
|
||||
searcher_generation_inventory: &Inventory<SearcherGeneration>,
|
||||
) -> crate::Result<Arc<SearcherInner>> {
|
||||
let segment_readers = Self::open_segment_readers(index)?;
|
||||
let searcher_generation = Self::track_segment_readers_in_inventory(
|
||||
&segment_readers,
|
||||
searcher_generation_counter,
|
||||
searcher_generation_inventory,
|
||||
);
|
||||
|
||||
let schema = index.schema();
|
||||
let searcher = Arc::new(SearcherInner::new(
|
||||
schema,
|
||||
@@ -236,17 +240,12 @@ impl InnerIndexReader {
|
||||
}
|
||||
|
||||
fn reload(&self) -> crate::Result<()> {
|
||||
let segment_readers = Self::open_segment_readers(&self.index)?;
|
||||
let searcher_generation = Self::create_new_searcher_generation(
|
||||
&segment_readers,
|
||||
&self.searcher_generation_counter,
|
||||
&self.searcher_generation_inventory,
|
||||
);
|
||||
let searcher = Self::create_searcher(
|
||||
&self.index,
|
||||
self.doc_store_cache_size,
|
||||
&self.warming_state,
|
||||
searcher_generation,
|
||||
&self.searcher_generation_counter,
|
||||
&self.searcher_generation_inventory,
|
||||
)?;
|
||||
|
||||
self.searcher.store(searcher);
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::string::FromUtf8Error;
|
||||
use common::BinarySerializable;
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use serde::de::Error as _;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
const SLASH_BYTE: u8 = b'/';
|
||||
@@ -230,7 +231,9 @@ impl Serialize for Facet {
|
||||
impl<'de> Deserialize<'de> for Facet {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where D: Deserializer<'de> {
|
||||
<&'de str as Deserialize<'de>>::deserialize(deserializer).map(Facet::from)
|
||||
<Cow<'de, str> as Deserialize<'de>>::deserialize(deserializer).and_then(|path| {
|
||||
Facet::from_text(&*path).map_err(|err| D::Error::custom(err.to_string()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,4 +330,25 @@ mod tests {
|
||||
assert!(Facet::from("/").is_prefix_of(&Facet::from("/foobar")));
|
||||
assert!(!Facet::from("/").is_prefix_of(&Facet::from("/")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_from_borrowed_string() {
|
||||
let facet = serde_json::from_str::<Facet>(r#""/foo/bar""#).unwrap();
|
||||
assert_eq!(facet, Facet::from_path(["foo", "bar"]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_from_owned_string() {
|
||||
let facet = serde_json::from_str::<Facet>(r#""/foo/\u263A""#).unwrap();
|
||||
assert_eq!(facet, Facet::from_path(["foo", "☺"]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_from_invalid_string() {
|
||||
let error = serde_json::from_str::<Facet>(r#""foo/bar""#).unwrap_err();
|
||||
assert_eq!(
|
||||
error.to_string(),
|
||||
"Failed to parse the facet string: 'foo/bar'"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::ops::Range;
|
||||
|
||||
use htmlescape::encode_minimal;
|
||||
@@ -7,7 +7,7 @@ use htmlescape::encode_minimal;
|
||||
use crate::query::Query;
|
||||
use crate::schema::{Field, Value};
|
||||
use crate::tokenizer::{TextAnalyzer, Token};
|
||||
use crate::{Document, Score, Searcher};
|
||||
use crate::{Document, Score, Searcher, Term};
|
||||
|
||||
const DEFAULT_MAX_NUM_CHARS: usize = 150;
|
||||
|
||||
@@ -79,7 +79,7 @@ impl Snippet {
|
||||
let mut html = String::new();
|
||||
let mut start_from: usize = 0;
|
||||
|
||||
for item in self.highlighted.iter() {
|
||||
for item in collapse_overlapped_ranges(&self.highlighted) {
|
||||
html.push_str(&encode_minimal(&self.fragment[start_from..item.start]));
|
||||
html.push_str(HIGHLIGHTEN_PREFIX);
|
||||
html.push_str(&encode_minimal(&self.fragment[item.clone()]));
|
||||
@@ -186,6 +186,53 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns ranges that are collapsed into non-overlapped ranges.
|
||||
///
|
||||
/// ## Examples
|
||||
/// - [0..1, 2..3] -> [0..1, 2..3] # no overlap
|
||||
/// - [0..1, 1..2] -> [0..1, 1..2] # no overlap
|
||||
/// - [0..2, 1..2] -> [0..2] # collapsed
|
||||
/// - [0..2, 1..3] -> [0..3] # collapsed
|
||||
/// - [0..3, 1..2] -> [0..3] # second range's end is also inside of the first range
|
||||
///
|
||||
/// Note: This function assumes `ranges` is sorted by `Range.start` in ascending order.
|
||||
fn collapse_overlapped_ranges(ranges: &[Range<usize>]) -> Vec<Range<usize>> {
|
||||
debug_assert!(is_sorted(ranges.iter().map(|range| range.start)));
|
||||
|
||||
let mut result = Vec::new();
|
||||
let mut ranges_it = ranges.iter();
|
||||
|
||||
let mut current = match ranges_it.next() {
|
||||
Some(range) => range.clone(),
|
||||
None => return result,
|
||||
};
|
||||
|
||||
for range in ranges {
|
||||
if current.end > range.start {
|
||||
current = current.start..std::cmp::max(current.end, range.end);
|
||||
} else {
|
||||
result.push(current);
|
||||
current = range.clone();
|
||||
}
|
||||
}
|
||||
|
||||
result.push(current);
|
||||
result
|
||||
}
|
||||
|
||||
fn is_sorted(mut it: impl Iterator<Item = usize>) -> bool {
|
||||
if let Some(first) = it.next() {
|
||||
let mut prev = first;
|
||||
for item in it {
|
||||
if item < prev {
|
||||
return false;
|
||||
}
|
||||
prev = item;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// `SnippetGenerator`
|
||||
///
|
||||
/// # Example
|
||||
@@ -255,19 +302,20 @@ impl SnippetGenerator {
|
||||
query: &dyn Query,
|
||||
field: Field,
|
||||
) -> crate::Result<SnippetGenerator> {
|
||||
let mut terms = BTreeMap::new();
|
||||
query.query_terms(&mut terms);
|
||||
let mut terms_text: BTreeMap<String, Score> = Default::default();
|
||||
for (term, _) in terms {
|
||||
if term.field() != field {
|
||||
continue;
|
||||
let mut terms: BTreeSet<&Term> = BTreeSet::new();
|
||||
query.query_terms(&mut |term, _| {
|
||||
if term.field() == field {
|
||||
terms.insert(term);
|
||||
}
|
||||
});
|
||||
let mut terms_text: BTreeMap<String, Score> = Default::default();
|
||||
for term in terms {
|
||||
let term_str = if let Some(term_str) = term.as_str() {
|
||||
term_str
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
let doc_freq = searcher.doc_freq(&term)?;
|
||||
let doc_freq = searcher.doc_freq(term)?;
|
||||
if doc_freq > 0 {
|
||||
let score = 1.0 / (1.0 + doc_freq as Score);
|
||||
terms_text.insert(term_str.to_string(), score);
|
||||
@@ -319,10 +367,10 @@ mod tests {
|
||||
|
||||
use maplit::btreemap;
|
||||
|
||||
use super::{search_fragments, select_best_fragment_combination};
|
||||
use super::{collapse_overlapped_ranges, search_fragments, select_best_fragment_combination};
|
||||
use crate::query::QueryParser;
|
||||
use crate::schema::{IndexRecordOption, Schema, TextFieldIndexing, TextOptions, TEXT};
|
||||
use crate::tokenizer::SimpleTokenizer;
|
||||
use crate::tokenizer::{NgramTokenizer, SimpleTokenizer};
|
||||
use crate::{Index, SnippetGenerator};
|
||||
|
||||
const TEST_TEXT: &str = r#"Rust is a systems programming language sponsored by
|
||||
@@ -587,4 +635,44 @@ Survey in 2016, 2017, and 2018."#;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_collapse_overlapped_ranges() {
|
||||
assert_eq!(&collapse_overlapped_ranges(&[0..1, 2..3,]), &[0..1, 2..3]);
|
||||
assert_eq!(
|
||||
collapse_overlapped_ranges(&vec![0..1, 1..2,]),
|
||||
vec![0..1, 1..2]
|
||||
);
|
||||
assert_eq!(collapse_overlapped_ranges(&[0..2, 1..2,]), vec![0..2]);
|
||||
assert_eq!(collapse_overlapped_ranges(&[0..2, 1..3,]), vec![0..3]);
|
||||
assert_eq!(collapse_overlapped_ranges(&[0..3, 1..2,]), vec![0..3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snippet_with_overlapped_highlighted_ranges() {
|
||||
let text = "abc";
|
||||
|
||||
let mut terms = BTreeMap::new();
|
||||
terms.insert(String::from("ab"), 0.9);
|
||||
terms.insert(String::from("bc"), 1.0);
|
||||
|
||||
let fragments = search_fragments(
|
||||
&From::from(NgramTokenizer::all_ngrams(2, 2)),
|
||||
text,
|
||||
&terms,
|
||||
3,
|
||||
);
|
||||
|
||||
assert_eq!(fragments.len(), 1);
|
||||
{
|
||||
let first = &fragments[0];
|
||||
assert_eq!(first.score, 1.9);
|
||||
assert_eq!(first.start_offset, 0);
|
||||
assert_eq!(first.stop_offset, 3);
|
||||
}
|
||||
|
||||
let snippet = select_best_fragment_combination(&fragments[..], text);
|
||||
assert_eq!(snippet.fragment, "abc");
|
||||
assert_eq!(snippet.to_html(), "<b>abc</b>");
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user