Compare commits

..

88 Commits

Author SHA1 Message Date
Pascal Seitz
0b1add0ec6 move rand to optional dependencies
closes #1117
2021-07-21 17:49:24 +01:00
François Massot
1db76dd9cf Merge pull request #1113 from shikhar/patch-1
stale comments in segment_reader.rs
2021-07-20 23:02:20 +02:00
François Massot
467a9517db Merge pull request #1114 from shikhar/patch-2
FilterCollector doc fix
2021-07-20 21:02:28 +02:00
Shikhar Bhushan
b361315a67 FilterCollector doc fix
Other types supported since https://github.com/tantivy-search/tantivy/pull/953/files
2021-07-15 22:55:47 -04:00
Shikhar Bhushan
4e3771bffc stale comments in segment_reader.rs 2021-07-15 22:47:32 -04:00
PSeitz
8176b0335a Merge pull request #1108 from PSeitz/pwnedbytes
move ownedbytes to own crate
2021-07-05 16:07:56 +02:00
Pascal Seitz
811ac98f36 more inlines 2021-07-05 15:49:42 +02:00
François Massot
f4b2e71800 Handle field names with any characters with a known set of special (#1109)
* Handle field names with any characters with a known set of special characters and an escape one

* Update field name validation rule to check only if it has at least one character and does not start with `-`

Closes #1087.
2021-07-05 22:31:36 +09:00
PSeitz
c431cfcf12 extend proptests, fix race condition (#1107)
* extend proptests, fix race condition
* cargo fmt
2021-07-05 18:28:56 +09:00
PSeitz
92f20bc5a2 use nightly image in coverage 2021-07-03 09:38:44 +02:00
PSeitz
57f931da3c Create coverage.yml 2021-07-03 09:35:07 +02:00
Pascal Seitz
9b662e6d03 move ownedbytes to own crate
fixes #1106
2021-07-02 16:51:59 +02:00
PSeitz
18377d949c Merge pull request #1105 from PSeitz/clippy
Fix clippy warnings
2021-07-02 10:01:19 +02:00
Pascal Seitz
e6427b2588 cleanup 2021-07-02 09:21:22 +02:00
Pascal Seitz
0062fe705d cargo fmt 2021-07-01 18:17:08 +02:00
Pascal Seitz
9b3e508753 fix clippy 2021-07-01 18:06:09 +02:00
Pascal Seitz
a1ac63ee1c fix clippy 2021-07-01 18:06:03 +02:00
Pascal Seitz
e496ae0470 clippy fixes 2021-07-01 17:43:50 +02:00
Pascal Seitz
1e4df54ab3 fix clippy 2021-07-01 17:41:53 +02:00
Pascal Seitz
2de249af74 clippy fixes 2021-07-01 17:37:37 +02:00
Pascal Seitz
10f056fbb4 apply clippy fixes 2021-07-01 17:08:44 +02:00
PSeitz
074b09d0c0 Merge pull request #1102 from PSeitz/proptests
extend proptests for sorting and merge
2021-07-01 16:23:53 +02:00
Pascal Seitz
86d0727659 add facet test
closes #1100
2021-07-01 15:36:17 +02:00
Pascal Seitz
be3e1b8718 cargo fmt 2021-07-01 14:02:09 +02:00
Pascal Seitz
8fdf59bdac add search test for proptest 2021-07-01 14:01:30 +02:00
Pascal Seitz
ebebce2102 cargo fmt 2021-07-01 10:47:20 +02:00
Pascal Seitz
8044ec38da test docstore in proptest 2021-07-01 10:15:42 +02:00
Pascal Seitz
7413f87265 use set instead of vec in proptest 2021-07-01 08:28:51 +02:00
PSeitz
aea2e77665 Merge pull request #1097 from PSeitz/multifastfield
Use dynamic fastfield codes for multivalues fixes #1093
2021-06-30 14:38:26 +02:00
Pascal Seitz
a15845f9fd add merge case to proptest, test multivalue fastfields
#1100
2021-06-30 13:13:33 +02:00
Pascal Seitz
94ac44df4f proptest with optional sorting 2021-06-30 12:06:03 +02:00
Pascal Seitz
f80d804a57 add random commits in proptest 2021-06-30 11:18:07 +02:00
Pascal Seitz
3b5c1d7817 use measure_time 0.7 2021-06-30 11:08:02 +02:00
Pascal Seitz
24274edf81 remove trait impl fpr &Vec 2021-06-30 09:50:47 +02:00
Paul Masurel
d58497529b Fixed CHANGELOG to include 0.15.2. 2021-06-30 16:34:47 +09:00
Pascal Seitz
130495abab cleanup 2021-06-30 08:57:55 +02:00
Pascal Seitz
9b743d60fb make docid mapping non optional
make docid mapping non optional
add trivial flag for docid mapping
add time measurements
2021-06-30 08:57:55 +02:00
Pascal Seitz
5c9e2ef036 wrap docidmapping in struct 2021-06-30 08:57:55 +02:00
Pascal Seitz
8526434b63 add dynamic fastfield case
add dynamic fastfield for single fast field unsorted
fix scary documentation bug
add num_len instead of len
2021-06-30 08:57:55 +02:00
Pascal Seitz
6ba302c481 Use dynamic fastfield codes for multivalues fixes #1093
Use dynamic fastfield codes for multivalues fixes (only sorting case covered)
Rename get to get_val due to conflict with Vec
use u64 precision instead of u32 for get_range, to allow use of existing fast field interface interface (actually not sure if it would be better have a different interface)
2021-06-30 08:57:55 +02:00
Paul Masurel
de92f094aa Closes #1101 fix delete documents with sort by field
Closes #1101

* fix delete documents with sort by field

Co-authored-by: Andre-Philippe Paquet <appaquet@gmail.com>
2021-06-30 15:51:32 +09:00
Evance Soumaoro
c82cee66de exposing min/max value interface on MultiValuedFastField Reader (#1096) 2021-06-23 17:38:50 +09:00
Paul Masurel
6eed05b1ce Revert "Exposing min/max value interface on MultiValuedFastField Reader (#1094)" (#1095)
This reverts commit bb488305c9.
2021-06-23 10:25:11 +09:00
Evance Soumaoro
bb488305c9 Exposing min/max value interface on MultiValuedFastField Reader (#1094)
Exposing min/max value interface on MultiValuedFastField Reader
2021-06-23 08:51:36 +09:00
PSeitz
f05e84f964 add FieldEntry constructor, closes #1086 (#1090) 2021-06-17 10:15:48 +09:00
PSeitz
65546ed22b Merge pull request #1088 from appaquet/fix/store-reader-iterator-take2
Fix corruption in store reader iterator, take 2
2021-06-16 14:44:00 +02:00
Andre-Philippe Paquet
57ae5b27dc fix store reader iterator, take 2 2021-06-16 07:51:39 -04:00
PSeitz
f9531ec3c9 Merge pull request #1085 from PSeitz/fastfieldcompression
use concrete return type, fixes #1084
2021-06-16 13:07:12 +02:00
Pascal Seitz
5b54a32563 use concrete return type, fixes #1084 2021-06-16 12:03:11 +02:00
PSeitz
cd049e28bc Merge pull request #1082 from PSeitz/fastfieldcompression
use dynamic fast field codec for offset index
2021-06-16 11:59:00 +02:00
PSeitz
646e41bec4 Merge pull request #1083 from PSeitz/termdict_block_layout
Move counting writer to common
2021-06-16 08:57:55 +02:00
Pascal Seitz
36528c5e83 move counting writer to common
move counting writer to common
reuse counting writer in fastfield codec
2021-06-16 08:14:04 +02:00
Pascal Seitz
cd169dee23 use dynamic fast field codec for offset index 2021-06-15 13:34:42 +02:00
PSeitz
b5cc60f80b Merge pull request #1080 from PSeitz/more_tests
test all features in github actions
2021-06-15 10:51:57 +02:00
Pascal Seitz
060b83159a use nightly for tests 2021-06-15 10:08:49 +02:00
Pascal Seitz
a40ff35453 test all features 2021-06-15 09:31:39 +02:00
PSeitz
268e6bfe6e update fast field codec readme 2021-06-15 09:19:39 +02:00
PSeitz
f902440b8b Merge pull request #1072 from PSeitz/fastfieldcompression
Enable support for multiple fastfield codecs, add linear interpolation
2021-06-15 09:07:17 +02:00
Pascal Seitz
77a0902605 replace unwrap, use vec in bench 2021-06-14 17:01:46 +02:00
Pascal Seitz
c889ae10e4 add is_applicable to fast field codecs 2021-06-14 16:16:25 +02:00
Pascal Seitz
0a534c6ee0 rename create to serialize 2021-06-14 15:40:07 +02:00
Pascal Seitz
167d88b449 fix tests behind unstable feature flag 2021-06-14 15:31:12 +02:00
Pascal Seitz
1071ed84f2 fix cond compilation 2021-06-14 14:05:04 +02:00
Pascal Seitz
abb5624af2 add contributing guidelines, add codec comparer binary
add contributing guidelines
add codec comparer binary to test codec compressions with different test data sets
2021-06-14 13:56:40 +02:00
Pascal Seitz
1d41b96d32 rename, add codec_tester 2021-06-14 13:56:40 +02:00
Pascal Seitz
ef4665945f rename file 2021-06-14 13:56:40 +02:00
Pascal Seitz
294cd5fd0b streamline traits and tests 2021-06-14 13:56:40 +02:00
Pascal Seitz
f4d271177c add inline, add readme 2021-06-14 13:56:40 +02:00
Pascal Seitz
451538fecf add serialize for bool 2021-06-14 13:56:40 +02:00
Pascal Seitz
e78e0fec59 add multilinearinterpolation
add multilinearinterpolation, which compresses blocks of size 512
add checks for linear interpolation
2021-06-14 13:56:40 +02:00
Pascal Seitz
2e639cebf8 fix bitpacker bug, reset internal value 2021-06-14 13:56:40 +02:00
Pascal Seitz
e296da7ade add debug and failsafes 2021-06-14 13:56:40 +02:00
Pascal Seitz
3b3e26c4b8 use f64 precision for slope calculation 2021-06-14 13:56:40 +02:00
Pascal Seitz
6a4883ac69 use uniform distribution sampling 2021-06-14 13:56:40 +02:00
Pascal Seitz
0ba05df545 add f32::MAX to disable a compressor 2021-06-14 13:56:40 +02:00
Pascal Seitz
aa3c4d4029 use f32 precision, add inline 2021-06-14 13:56:40 +02:00
Pascal Seitz
60df629725 cargo.toml license desc and author 2021-06-14 13:56:40 +02:00
Pascal Seitz
2570b005ac fix estimation test 2021-06-14 13:56:40 +02:00
Pascal Seitz
d5212cd19d fix clippy 2021-06-14 13:56:40 +02:00
Pascal Seitz
2193d85622 fix clippy and common crate tests 2021-06-14 13:56:40 +02:00
Pascal Seitz
dfdbfe9eff add benchmark for fast field codecs
test tests::bench_fastfield_bitpack_create        ... bench:      57,628 ns/iter (+/- 23,486)
test tests::bench_fastfield_bitpack_get           ... bench:      43,323 ns/iter (+/- 4,286)
test tests::bench_fastfield_linearinterpol_create ... bench:     223,625 ns/iter (+/- 33,563)
test tests::bench_fastfield_linearinterpol_get    ... bench:      82,839 ns/iter (+/- 9,575)
2021-06-14 13:56:40 +02:00
Pascal Seitz
b999e836b2 replace BitpackedFastFieldReader, delete FastFieldSerializer trait 2021-06-14 13:56:40 +02:00
Pascal Seitz
be2dd41e69 add interface to create and read codecs
add CodecReader as common interface in fastfield codec crate
add LinearInterpolation to DynamicFastFieldReader
calc estimation and choose best codec
cleanup
2021-06-14 13:56:40 +02:00
Pascal Seitz
483fdb79cc add linear interpolation estimation
add estimation tests
add codec test data in tests
2021-06-14 13:56:40 +02:00
Pascal Seitz
aefd0fc907 refactor, add fastfield metadata to footer
change api to fastfield reader in codec crate
add fastfield metadata to footer
remove old code
merge codec files
2021-06-14 13:56:40 +02:00
Pascal Seitz
3298d6cb71 move common to common crate, create fastfield_codecs crate
move common to common crate
create fastfield_codecs crate
add bitpacker to fast field codecs
add linear interpolation to fast field codecs
add tests
2021-06-14 13:56:40 +02:00
Pascal Seitz
c02c78ea73 implement linear interpol serializer 2021-06-14 13:56:40 +02:00
Pascal Seitz
6bf4fee1ba support multiple codecs
support multiple codes
prepend codec id to all fast fields
add new api to create fastfields with access to all data
use new fastfield creation api in initial creation and merge
remove unused collect of data in doc_id_mapping
2021-06-14 13:56:40 +02:00
105 changed files with 3473 additions and 1384 deletions

27
.github/workflows/coverage.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: coverage
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
test:
name: coverage
runs-on: ubuntu-latest
container:
image: xd009642/tarpaulin:develop-nightly
options: --security-opt seccomp=unconfined
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Generate code coverage
run: |
cargo +nightly tarpaulin --verbose --all-features --workspace --timeout 120 --out Xml
- name: Upload to codecov.io
uses: codecov/codecov-action@v1
with:
# token: ${{secrets.CODECOV_TOKEN}} # not required for public repos
fail_ci_if_error: true

View File

@@ -18,7 +18,13 @@ jobs:
- uses: actions/checkout@v2
- name: Build
run: cargo build --verbose --workspace
- name: Install latest nightly to test also against unstable feature flag
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
components: rustfmt
- name: Run tests
run: cargo test --verbose --workspace
run: cargo test --all-features --verbose --workspace
- name: Check Formatting
run: cargo fmt --all -- --check

View File

@@ -1,3 +1,8 @@
Tantivy 0.15.3
=========================
- Major bugfix. Deleting documents was broken when the index was sorted by a field. (@appaquet, @fulmicoton) #1101
Tantivy 0.15.2
========================
- Major bugfix. DocStore still panics when a deleted doc is at the beginning of a block. (@appaquet) #1088

View File

@@ -1,6 +1,6 @@
[package]
name = "tantivy"
version = "0.15.2"
version = "0.16.0-dev"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
@@ -35,6 +35,9 @@ crossbeam = "0.8"
futures = { version = "0.3.15", features = ["thread-pool"] }
tantivy-query-grammar = { version="0.15.0", path="./query-grammar" }
tantivy-bitpacker = { version="0.1", path="./bitpacker" }
common = { version="0.1", path="./common" }
fastfield_codecs = { version="0.1", path="./fastfield_codecs", default-features = false }
ownedbytes = { version="0.1", path="./ownedbytes" }
stable_deref_trait = "1.2"
rust-stemmers = "1.2"
downcast-rs = "1.2"
@@ -51,6 +54,7 @@ rayon = "1.5"
lru = "0.6.5"
fastdivide = "0.3"
itertools = "0.10.0"
measure_time = "0.7.0"
[target.'cfg(windows)'.dependencies]
winapi = "0.3.9"
@@ -88,7 +92,7 @@ unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"]
[workspace]
members = ["query-grammar", "bitpacker"]
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
[badges]
travis-ci = { repository = "tantivy-search/tantivy" }

View File

@@ -1,7 +1,7 @@
use criterion::{criterion_group, criterion_main, Criterion};
use tantivy::tokenizer::TokenizerManager;
const ALICE_TXT: &'static str = include_str!("alice.txt");
const ALICE_TXT: &str = include_str!("alice.txt");
pub fn criterion_benchmark(c: &mut Criterion) {
let tokenizer_manager = TokenizerManager::default();

View File

@@ -49,6 +49,7 @@ impl BitPacker {
let bytes = self.mini_buffer.to_le_bytes();
output.write_all(&bytes[..num_bytes])?;
self.mini_buffer_written = 0;
self.mini_buffer = 0;
}
Ok(())
}
@@ -61,7 +62,7 @@ impl BitPacker {
}
}
#[derive(Clone)]
#[derive(Clone, Debug, Default)]
pub struct BitUnpacker {
num_bits: u64,
mask: u64,

12
common/Cargo.toml Normal file
View File

@@ -0,0 +1,12 @@
[package]
name = "common"
version = "0.1.0"
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
license = "MIT"
edition = "2018"
description = "common traits and utility functions used by multiple tantivy subcrates"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
byteorder = "1.4.3"

9
common/src/lib.rs Normal file
View File

@@ -0,0 +1,9 @@
pub use byteorder::LittleEndian as Endianness;
mod serialize;
mod vint;
mod writer;
pub use serialize::{BinarySerializable, DeserializeFrom, FixedSize};
pub use vint::{read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt};
pub use writer::{AntiCallToken, CountingWriter, TerminatingWrite};

View File

@@ -1,5 +1,5 @@
use crate::common::Endianness;
use crate::common::VInt;
use crate::Endianness;
use crate::VInt;
use byteorder::{ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io;
@@ -160,6 +160,28 @@ impl FixedSize for u8 {
const SIZE_IN_BYTES: usize = 1;
}
impl BinarySerializable for bool {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
let val = if *self { 1 } else { 0 };
writer.write_u8(val)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<bool> {
let val = reader.read_u8()?;
match val {
0 => Ok(false),
1 => Ok(true),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid bool value on deserialization, data corrupted",
)),
}
}
}
impl FixedSize for bool {
const SIZE_IN_BYTES: usize = 1;
}
impl BinarySerializable for String {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
let data: &[u8] = self.as_bytes();
@@ -180,9 +202,9 @@ impl BinarySerializable for String {
#[cfg(test)]
pub mod test {
use super::VInt;
use super::*;
use crate::common::VInt;
use crate::serialize::BinarySerializable;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();

View File

@@ -106,7 +106,7 @@ pub fn read_u32_vint_no_advance(data: &[u8]) -> (u32, usize) {
pub fn write_u32_vint<W: io::Write>(val: u32, writer: &mut W) -> io::Result<()> {
let mut buf = [0u8; 8];
let data = serialize_vint_u32(val, &mut buf);
writer.write_all(&data)
writer.write_all(data)
}
impl VInt {
@@ -175,14 +175,14 @@ impl BinarySerializable for VInt {
mod tests {
use super::serialize_vint_u32;
use super::BinarySerializable;
use super::VInt;
use crate::common::BinarySerializable;
fn aux_test_vint(val: u64) {
let mut v = [14u8; 10];
let num_bytes = VInt(val).serialize_into(&mut v);
for i in num_bytes..10 {
assert_eq!(v[i], 14u8);
for el in &v[num_bytes..10] {
assert_eq!(el, &14u8);
}
assert!(num_bytes > 0);
if num_bytes < 10 {

View File

@@ -1,7 +1,4 @@
use crate::directory::AntiCallToken;
use crate::directory::TerminatingWrite;
use std::io;
use std::io::Write;
use std::io::{self, BufWriter, Write};
pub struct CountingWriter<W> {
underlying: W,
@@ -16,41 +13,87 @@ impl<W: Write> CountingWriter<W> {
}
}
#[inline]
pub fn written_bytes(&self) -> u64 {
self.written_bytes
}
/// Returns the underlying write object.
/// Note that this method does not trigger any flushing.
#[inline]
pub fn finish(self) -> W {
self.underlying
}
}
impl<W: Write> Write for CountingWriter<W> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let written_size = self.underlying.write(buf)?;
self.written_bytes += written_size as u64;
Ok(written_size)
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.underlying.write_all(buf)?;
self.written_bytes += buf.len() as u64;
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.underlying.flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
#[inline]
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.underlying.terminate_ref(token)
}
}
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
///
/// The point is that while the type is public, it cannot be built by anyone
/// outside of this module.
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[cfg(test)]
mod test {

View File

@@ -139,7 +139,7 @@ fn main() -> tantivy::Result<()> {
//
// Lets index a bunch of fake documents for the sake of
// this example.
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer(50_000_000)?;
index_writer.add_document(doc!(

View File

@@ -12,7 +12,7 @@ fn main() -> tantivy::Result<()> {
let ingredient = schema_builder.add_facet_field("ingredient", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer(30_000_000)?;
@@ -51,7 +51,7 @@ fn main() -> tantivy::Result<()> {
let query = BooleanQuery::new_multiterms_query(
facets
.iter()
.map(|key| Term::from_facet(ingredient, &key))
.map(|key| Term::from_facet(ingredient, key))
.collect(),
);
let top_docs_by_custom_score =

View File

@@ -22,7 +22,7 @@ fn main() -> tantivy::Result<()> {
let title = schema_builder.add_text_field("title", TEXT | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 50_000_000)?;
index_writer.add_document(doc!(title => "The Old Man and the Sea"));

View File

@@ -82,7 +82,7 @@ fn main() -> tantivy::Result<()> {
}]
}"#;
let short_man_doc = schema.parse_document(&short_man_json)?;
let short_man_doc = schema.parse_document(short_man_json)?;
index_writer.add_document(short_man_doc);

View File

@@ -25,7 +25,7 @@ fn main() -> tantivy::Result<()> {
let schema = schema_builder.build();
// # Indexing documents
let index = Index::create_in_dir(&index_path, schema.clone())?;
let index = Index::create_in_dir(&index_path, schema)?;
let mut index_writer = index.writer(50_000_000)?;

View File

@@ -1,4 +1,3 @@
use tantivy;
use tantivy::schema::*;
// # Document from json
@@ -22,7 +21,7 @@ fn main() -> tantivy::Result<()> {
}"#;
// We can parse our document
let _mice_and_men_doc = schema.parse_document(&mice_and_men_doc_json)?;
let _mice_and_men_doc = schema.parse_document(mice_and_men_doc_json)?;
// Multi-valued field are allowed, they are
// expressed in JSON by an array.
@@ -31,7 +30,7 @@ fn main() -> tantivy::Result<()> {
"title": ["Frankenstein", "The Modern Prometheus"],
"year": 1818
}"#;
let _frankenstein_doc = schema.parse_document(&frankenstein_json)?;
let _frankenstein_doc = schema.parse_document(frankenstein_json)?;
// Note that the schema is saved in your index directory.
//

View File

@@ -0,0 +1,24 @@
[package]
name = "fastfield_codecs"
version = "0.1.0"
authors = ["Pascal Seitz <pascal@quickwit.io>"]
license = "MIT"
edition = "2018"
description = "Fast field codecs used by tantivy"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
common = { path = "../common/" }
tantivy-bitpacker = { path = "../bitpacker/" }
prettytable-rs = {version="0.8.0", optional= true}
rand = {version="0.8.3", optional= true}
[dev-dependencies]
more-asserts = "0.2.1"
rand = "0.8.3"
[features]
bin = ["prettytable-rs", "rand"]
default = ["bin"]

View File

@@ -0,0 +1,68 @@
# Fast Field Codecs
This crate contains various fast field codecs, used to compress/decompress fast field data in tantivy.
## Contributing
Contributing is pretty straightforward. Since the bitpacking is the simplest compressor, you can check it for reference.
A codec needs to implement 2 traits:
- A reader implementing `FastFieldCodecReader` to read the codec.
- A serializer implementing `FastFieldCodecSerializer` for compression estimation and codec name + id.
### Tests
Once the traits are implemented test and benchmark integration is pretty easy (see `test_with_codec_data_sets` and `bench.rs`).
Make sure to add the codec to the main.rs, which tests the compression ratio and estimation against different data sets. You can run it with:
```
cargo run --features bin
```
### TODO
- Add real world data sets in comparison
- Add codec to cover sparse data sets
### Codec Comparison
```
+----------------------------------+-------------------+------------------------+
| | Compression Ratio | Compression Estimation |
+----------------------------------+-------------------+------------------------+
| Autoincrement | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.000039572664 | 0.000004396963 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.1477348 | 0.17275847 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.28126493 | 0.28125 |
+----------------------------------+-------------------+------------------------+
| Monotonically increasing concave | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.25003937 | 0.26562938 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.190665 | 0.1883836 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.31251436 | 0.3125 |
+----------------------------------+-------------------+------------------------+
| Monotonically increasing convex | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.25003937 | 0.28125438 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.18676 | 0.2040086 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.31251436 | 0.3125 |
+----------------------------------+-------------------+------------------------+
| Almost monotonically increasing | | |
+----------------------------------+-------------------+------------------------+
| LinearInterpol | 0.14066513 | 0.1562544 |
+----------------------------------+-------------------+------------------------+
| MultiLinearInterpol | 0.16335973 | 0.17275847 |
+----------------------------------+-------------------+------------------------+
| Bitpacked | 0.28126493 | 0.28125 |
+----------------------------------+-------------------+------------------------+
```

View File

@@ -0,0 +1,108 @@
#![feature(test)]
extern crate test;
#[cfg(test)]
mod tests {
use fastfield_codecs::{
bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer},
linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer},
multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
},
*,
};
fn get_data() -> Vec<u64> {
let mut data: Vec<_> = (100..55000_u64)
.map(|num| num + rand::random::<u8>() as u64)
.collect();
data.push(99_000);
data.insert(1000, 2000);
data.insert(2000, 100);
data.insert(3000, 4100);
data.insert(4000, 100);
data.insert(5000, 800);
data
}
fn value_iter() -> impl Iterator<Item = u64> {
0..20_000
}
fn bench_get<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
b: &mut Bencher,
data: &[u64],
) {
let mut bytes = vec![];
S::serialize(
&mut bytes,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let reader = R::open_from_bytes(&bytes).unwrap();
b.iter(|| {
for pos in value_iter() {
reader.get_u64(pos as u64, &bytes);
}
});
}
fn bench_create<S: FastFieldCodecSerializer>(b: &mut Bencher, data: &[u64]) {
let mut bytes = vec![];
b.iter(|| {
S::serialize(
&mut bytes,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
});
}
use test::Bencher;
#[bench]
fn bench_fastfield_bitpack_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<BitpackedFastFieldSerializer>(b, &data);
}
#[bench]
fn bench_fastfield_linearinterpol_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<LinearInterpolFastFieldSerializer>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_create(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_create::<MultiLinearInterpolFastFieldSerializer>(b, &data);
}
#[bench]
fn bench_fastfield_bitpack_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(b, &data);
}
#[bench]
fn bench_fastfield_linearinterpol_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>(b, &data);
}
#[bench]
fn bench_fastfield_multilinearinterpol_get(b: &mut Bencher) {
let data: Vec<_> = get_data();
bench_get::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>(
b, &data,
);
}
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);
let max_value = data.iter().cloned().max().unwrap_or(0);
FastFieldStats {
min_value,
max_value,
num_vals: data.len() as u64,
}
}
}

View File

@@ -0,0 +1,176 @@
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use common::BinarySerializable;
use std::io::{self, Write};
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use tantivy_bitpacker::BitUnpacker;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct BitpackedFastFieldReader {
bit_unpacker: BitUnpacker,
pub min_value_u64: u64,
pub max_value_u64: u64,
}
impl<'data> FastFieldCodecReader for BitpackedFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - 16);
let min_value = u64::deserialize(&mut footer)?;
let amplitude = u64::deserialize(&mut footer)?;
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(BitpackedFastFieldReader {
min_value_u64: min_value,
max_value_u64: max_value,
bit_unpacker,
})
}
#[inline]
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
self.min_value_u64 + self.bit_unpacker.get(doc, data)
}
#[inline]
fn min_value(&self) -> u64 {
self.min_value_u64
}
#[inline]
fn max_value(&self) -> u64 {
self.max_value_u64
}
}
pub struct BitpackedFastFieldSerializerLegacy<'a, W: 'a + Write> {
bit_packer: BitPacker,
write: &'a mut W,
min_value: u64,
amplitude: u64,
num_bits: u8,
}
impl<'a, W: Write> BitpackedFastFieldSerializerLegacy<'a, W> {
/// Creates a new fast field serializer.
///
/// The serializer in fact encode the values by bitpacking
/// `(val - min_value)`.
///
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
pub fn open(
write: &'a mut W,
min_value: u64,
max_value: u64,
) -> io::Result<BitpackedFastFieldSerializerLegacy<'a, W>> {
assert!(min_value <= max_value);
let amplitude = max_value - min_value;
let num_bits = compute_num_bits(amplitude);
let bit_packer = BitPacker::new();
Ok(BitpackedFastFieldSerializerLegacy {
bit_packer,
write,
min_value,
amplitude,
num_bits,
})
}
/// Pushes a new value to the currently open u64 fast field.
#[inline]
pub fn add_val(&mut self, val: u64) -> io::Result<()> {
let val_to_write: u64 = val - self.min_value;
self.bit_packer
.write(val_to_write, self.num_bits, &mut self.write)?;
Ok(())
}
pub fn close_field(mut self) -> io::Result<()> {
self.bit_packer.close(&mut self.write)?;
self.min_value.serialize(&mut self.write)?;
self.amplitude.serialize(&mut self.write)?;
Ok(())
}
}
pub struct BitpackedFastFieldSerializer {}
impl FastFieldCodecSerializer for BitpackedFastFieldSerializer {
const NAME: &'static str = "Bitpacked";
const ID: u8 = 1;
/// Serializes data with the BitpackedFastFieldSerializer.
///
/// The serializer in fact encode the values by bitpacking
/// `(val - min_value)`.
///
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
fn serialize(
write: &mut impl Write,
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
_data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
let mut serializer =
BitpackedFastFieldSerializerLegacy::open(write, stats.min_value, stats.max_value)?;
for val in data_iter {
serializer.add_val(val)?;
}
serializer.close_field()?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
_stats: FastFieldStats,
) -> bool {
true
}
fn estimate(_fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let amplitude = stats.max_value - stats.min_value;
let num_bits = compute_num_bits(amplitude);
let num_bits_uncompressed = 64;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) {
crate::tests::create_and_validate::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>(
data, name,
);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn bitpacked_fast_field_rand() {
for _ in 0..500 {
let mut data = (0..1 + rand::random::<u8>() as usize)
.map(|_| rand::random::<i64>() as u64 / 2)
.collect::<Vec<_>>();
create_and_validate(&data, "rand");
data.reverse();
create_and_validate(&data, "rand");
}
}
}

225
fastfield_codecs/src/lib.rs Normal file
View File

@@ -0,0 +1,225 @@
#[cfg(test)]
#[macro_use]
extern crate more_asserts;
use std::io;
use std::io::Write;
pub mod bitpacked;
pub mod linearinterpol;
pub mod multilinearinterpol;
pub trait FastFieldCodecReader: Sized {
/// reads the metadata and returns the CodecReader
fn open_from_bytes(bytes: &[u8]) -> std::io::Result<Self>;
fn get_u64(&self, doc: u64, data: &[u8]) -> u64;
fn min_value(&self) -> u64;
fn max_value(&self) -> u64;
}
/// The FastFieldSerializerEstimate trait is required on all variants
/// of fast field compressions, to decide which one to choose.
pub trait FastFieldCodecSerializer {
/// A codex needs to provide a unique name and id, which is
/// used for debugging and de/serialization.
const NAME: &'static str;
const ID: u8;
/// Check if the Codec is able to compress the data
fn is_applicable(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> bool;
/// Returns an estimate of the compression ratio.
/// The baseline is uncompressed 64bit data.
///
/// It could make sense to also return a value representing
/// computational complexity.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32;
/// Serializes the data using the serializer into write.
/// There are multiple iterators, in case the codec needs to read the data multiple times.
/// The iterators should be preferred over using fastfield_accessor for performance reasons.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()>;
}
/// FastFieldDataAccess is the trait to access fast field data during serialization and estimation.
pub trait FastFieldDataAccess {
/// Return the value associated to the given position.
///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance reasons.
///
/// # Panics
///
/// May panic if `position` is greater than the index.
fn get_val(&self, position: u64) -> u64;
}
#[derive(Debug, Clone)]
pub struct FastFieldStats {
pub min_value: u64,
pub max_value: u64,
pub num_vals: u64,
}
impl<'a> FastFieldDataAccess for &'a [u64] {
fn get_val(&self, position: u64) -> u64 {
self[position as usize]
}
}
impl FastFieldDataAccess for Vec<u64> {
fn get_val(&self, position: u64) -> u64 {
self[position as usize]
}
}
#[cfg(test)]
mod tests {
use crate::{
bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer},
linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer},
multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
},
};
pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(
data: &[u64],
name: &str,
) -> (f32, f32) {
if !S::is_applicable(&data, crate::tests::stats_from_vec(data)) {
return (f32::MAX, 0.0);
}
let estimation = S::estimate(&data, crate::tests::stats_from_vec(data));
let mut out = vec![];
S::serialize(
&mut out,
&data,
crate::tests::stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let reader = R::open_from_bytes(&out).unwrap();
for (doc, orig_val) in data.iter().enumerate() {
let val = reader.get_u64(doc as u64, &out);
if val != *orig_val {
panic!(
"val {:?} does not match orig_val {:?}, in data set {}, data {:?}",
val, orig_val, name, data
);
}
}
let actual_compression = data.len() as f32 / out.len() as f32;
(estimation, actual_compression)
}
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
let mut data_and_names = vec![];
let data = (10..=20_u64).collect::<Vec<_>>();
data_and_names.push((data, "simple monotonically increasing"));
data_and_names.push((
vec![5, 6, 7, 8, 9, 10, 99, 100],
"offset in linear interpol",
));
data_and_names.push((vec![5, 50, 3, 13, 1, 1000, 35], "rand small"));
data_and_names.push((vec![10], "single value"));
data_and_names
}
fn test_codec<S: FastFieldCodecSerializer, R: FastFieldCodecReader>() {
let codec_name = S::NAME;
for (data, data_set_name) in get_codec_test_data_sets() {
let (estimate, actual) =
crate::tests::create_and_validate::<S, R>(&data, data_set_name);
let result = if estimate == f32::MAX {
"Disabled".to_string()
} else {
format!("Estimate {:?} Actual {:?} ", estimate, actual)
};
println!(
"Codec {}, DataSet {}, {}",
codec_name, data_set_name, result
);
}
}
#[test]
fn test_codec_bitpacking() {
test_codec::<BitpackedFastFieldSerializer, BitpackedFastFieldReader>();
}
#[test]
fn test_codec_interpolation() {
test_codec::<LinearInterpolFastFieldSerializer, LinearInterpolFastFieldReader>();
}
#[test]
fn test_codec_multi_interpolation() {
test_codec::<MultiLinearInterpolFastFieldSerializer, MultiLinearInterpolFastFieldReader>();
}
use super::*;
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);
let max_value = data.iter().cloned().max().unwrap_or(0);
FastFieldStats {
min_value,
max_value,
num_vals: data.len() as u64,
}
}
#[test]
fn estimation_good_interpolation_case() {
let data = (10..=20000_u64).collect::<Vec<_>>();
let linear_interpol_estimation =
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, 0.01);
let multi_linear_interpol_estimation =
MultiLinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(multi_linear_interpol_estimation, 0.2);
assert_le!(linear_interpol_estimation, multi_linear_interpol_estimation);
let bitpacked_estimation =
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, bitpacked_estimation);
}
#[test]
fn estimation_test_bad_interpolation_case() {
let data = vec![200, 10, 10, 10, 10, 1000, 20];
let linear_interpol_estimation =
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, 0.32);
let bitpacked_estimation =
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(bitpacked_estimation, linear_interpol_estimation);
}
#[test]
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
let mut data = (200..=20000_u64).collect::<Vec<_>>();
data.push(1_000_000);
// in this case the linear interpolation can't in fact not be worse than bitpacking,
// but the estimator adds some threshold, which leads to estimated worse behavior
let linear_interpol_estimation =
LinearInterpolFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(linear_interpol_estimation, 0.35);
let bitpacked_estimation =
BitpackedFastFieldSerializer::estimate(&data, stats_from_vec(&data));
assert_le!(bitpacked_estimation, 0.32);
assert_le!(bitpacked_estimation, linear_interpol_estimation);
}
}

View File

@@ -0,0 +1,295 @@
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use std::io::{self, Read, Write};
use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::BinarySerializable;
use common::FixedSize;
use tantivy_bitpacker::BitUnpacker;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct LinearInterpolFastFieldReader {
bit_unpacker: BitUnpacker,
pub footer: LinearInterpolFooter,
pub slope: f32,
}
#[derive(Clone, Debug)]
pub struct LinearInterpolFooter {
pub relative_max_value: u64,
pub offset: u64,
pub first_val: u64,
pub last_val: u64,
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
}
impl BinarySerializable for LinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.relative_max_value.serialize(write)?;
self.offset.serialize(write)?;
self.first_val.serialize(write)?;
self.last_val.serialize(write)?;
self.num_vals.serialize(write)?;
self.min_value.serialize(write)?;
self.max_value.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<LinearInterpolFooter> {
Ok(LinearInterpolFooter {
relative_max_value: u64::deserialize(reader)?,
offset: u64::deserialize(reader)?,
first_val: u64::deserialize(reader)?,
last_val: u64::deserialize(reader)?,
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
})
}
}
impl FixedSize for LinearInterpolFooter {
const SIZE_IN_BYTES: usize = 56;
}
impl FastFieldCodecReader for LinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let (_data, mut footer) = bytes.split_at(bytes.len() - LinearInterpolFooter::SIZE_IN_BYTES);
let footer = LinearInterpolFooter::deserialize(&mut footer)?;
let slope = get_slope(footer.first_val, footer.last_val, footer.num_vals);
let num_bits = compute_num_bits(footer.relative_max_value);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(LinearInterpolFastFieldReader {
bit_unpacker,
footer,
slope,
})
}
#[inline]
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let calculated_value = get_calculated_value(self.footer.first_val, doc, self.slope);
(calculated_value + self.bit_unpacker.get(doc, data)) - self.footer.offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
}
/// Fastfield serializer, which tries to guess values by linear interpolation
/// and stores the difference bitpacked.
pub struct LinearInterpolFastFieldSerializer {}
#[inline]
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
if num_vals <= 1 {
return 0.0;
}
// We calculate the slope with f64 high precision and use the result in lower precision f32
// This is done in order to handle estimations for very large values like i64::MAX
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
#[inline]
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
first_val + (pos as f32 * slope) as u64
}
impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
const NAME: &'static str = "LinearInterpol";
const ID: u8 = 2;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// calculate offset to ensure all values are positive
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in data_iter1.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative values
// will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else {
//positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
}
// rel_positive_max will be adjusted by offset
let relative_max_value = rel_positive_max + offset;
let num_bits = compute_num_bits(relative_max_value);
let mut bit_packer = BitPacker::new();
for (pos, val) in data_iter.enumerate() {
let calculated_value = get_calculated_value(first_val, pos as u64, slope);
let diff = (val + offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.close(write)?;
let footer = LinearInterpolFooter {
relative_max_value,
offset,
first_val,
last_val,
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 3 {
return false; //disable compressor for this case
}
// On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima for the deviation of the calculated value are and
/// the offset to shift all values to >=0 is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let slope = get_slope(first_val, last_val, stats.num_vals);
// let's sample at 0%, 5%, 10% .. 95%, 100%
let num_vals = stats.num_vals as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value = get_calculated_value(first_val, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap_or(0);
// the theory would be that we don't have the actual max_distance, but we are close within 50%
// threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
+ LinearInterpolFooter::SIZE_IN_BYTES as u64;
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
#[inline]
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) {
crate::tests::create_and_validate::<
LinearInterpolFastFieldSerializer,
LinearInterpolFastFieldReader,
>(data, name);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn linear_interpol_fast_field_test_large_amplitude() {
let data = vec![
i64::MAX as u64 / 2,
i64::MAX as u64 / 3,
i64::MAX as u64 / 2,
];
create_and_validate(&data, "large amplitude");
}
#[test]
fn linear_interpol_fast_concave_data() {
let data = vec![0, 1, 2, 5, 8, 10, 20, 50];
create_and_validate(&data, "concave data");
}
#[test]
fn linear_interpol_fast_convex_data() {
let data = vec![0, 40, 60, 70, 75, 77];
create_and_validate(&data, "convex data");
}
#[test]
fn linear_interpol_fast_field_test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn linear_interpol_fast_field_rand() {
for _ in 0..5000 {
let mut data = (0..50).map(|_| rand::random::<u64>()).collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
}

View File

@@ -0,0 +1,127 @@
#[macro_use]
extern crate prettytable;
use fastfield_codecs::{
linearinterpol::LinearInterpolFastFieldSerializer,
multilinearinterpol::MultiLinearInterpolFastFieldSerializer, FastFieldCodecSerializer,
FastFieldStats,
};
use prettytable::{Cell, Row, Table};
fn main() {
let mut table = Table::new();
// Add a row per time
table.add_row(row!["", "Compression Ratio", "Compression Estimation"]);
for (data, data_set_name) in get_codec_test_data_sets() {
let mut results = vec![];
let res = serialize_with_codec::<LinearInterpolFastFieldSerializer>(&data);
results.push(res);
let res = serialize_with_codec::<MultiLinearInterpolFastFieldSerializer>(&data);
results.push(res);
let res = serialize_with_codec::<fastfield_codecs::bitpacked::BitpackedFastFieldSerializer>(
&data,
);
results.push(res);
//let best_estimation_codec = results
//.iter()
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
//.unwrap();
let best_compression_ratio_codec = results
.iter()
.min_by(|res1, res2| res1.partial_cmp(res2).unwrap())
.cloned()
.unwrap();
table.add_row(Row::new(vec![Cell::new(data_set_name).style_spec("Bbb")]));
for (is_applicable, est, comp, name) in results {
let (est_cell, ratio_cell) = if !is_applicable {
("Codec Disabled".to_string(), "".to_string())
} else {
(est.to_string(), comp.to_string())
};
#[allow(clippy::all)]
let style = if comp == best_compression_ratio_codec.1 {
"Fb"
} else {
""
};
table.add_row(Row::new(vec![
Cell::new(&name.to_string()).style_spec("bFg"),
Cell::new(&ratio_cell).style_spec(style),
Cell::new(&est_cell).style_spec(""),
]));
}
}
table.printstd();
}
pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
let mut data_and_names = vec![];
let data = (1000..=200_000_u64).collect::<Vec<_>>();
data_and_names.push((data, "Autoincrement"));
let mut current_cumulative = 0;
let data = (1..=200_000_u64)
.map(|num| {
let num = (num as f32 + num as f32).log10() as u64;
current_cumulative += num;
current_cumulative
})
.collect::<Vec<_>>();
//let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing concave"));
let mut current_cumulative = 0;
let data = (1..=200_000_u64)
.map(|num| {
let num = (200_000.0 - num as f32).log10() as u64;
current_cumulative += num;
current_cumulative
})
.collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing convex"));
let data = (1000..=200_000_u64)
.map(|num| num + rand::random::<u8>() as u64)
.collect::<Vec<_>>();
data_and_names.push((data, "Almost monotonically increasing"));
data_and_names
}
pub fn serialize_with_codec<S: FastFieldCodecSerializer>(
data: &[u64],
) -> (bool, f32, f32, &'static str) {
let is_applicable = S::is_applicable(&data, stats_from_vec(data));
if !is_applicable {
return (false, 0.0, 0.0, S::NAME);
}
let estimation = S::estimate(&data, stats_from_vec(data));
let mut out = vec![];
S::serialize(
&mut out,
&data,
stats_from_vec(data),
data.iter().cloned(),
data.iter().cloned(),
)
.unwrap();
let actual_compression = out.len() as f32 / (data.len() * 8) as f32;
(true, estimation, actual_compression, S::NAME)
}
pub fn stats_from_vec(data: &[u64]) -> FastFieldStats {
let min_value = data.iter().cloned().min().unwrap_or(0);
let max_value = data.iter().cloned().max().unwrap_or(0);
FastFieldStats {
min_value,
max_value,
num_vals: data.len() as u64,
}
}

View File

@@ -0,0 +1,411 @@
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use common::CountingWriter;
use std::io::{self, Read, Write};
use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::BinarySerializable;
use common::DeserializeFrom;
use tantivy_bitpacker::BitUnpacker;
const CHUNK_SIZE: u64 = 512;
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct MultiLinearInterpolFastFieldReader {
pub footer: MultiLinearInterpolFooter,
}
#[derive(Clone, Debug, Default)]
struct Function {
// The offset in the data is required, because we have diffrent bit_widths per block
data_start_offset: u64,
// start_pos in the block will be CHUNK_SIZE * BLOCK_NUM
start_pos: u64,
// only used during serialization, 0 after deserialization
end_pos: u64,
// only used during serialization, 0 after deserialization
value_start_pos: u64,
// only used during serialization, 0 after deserialization
value_end_pos: u64,
slope: f32,
// The offset so that all values are positive when writing them
positive_val_offset: u64,
num_bits: u8,
bit_unpacker: BitUnpacker,
}
impl Function {
fn calc_slope(&mut self) {
let num_vals = self.end_pos - self.start_pos;
get_slope(self.value_start_pos, self.value_end_pos, num_vals);
}
// split the interpolation into two function, change self and return the second split
fn split(&mut self, split_pos: u64, split_pos_value: u64) -> Function {
let mut new_function = Function {
start_pos: split_pos,
end_pos: self.end_pos,
value_start_pos: split_pos_value,
value_end_pos: self.value_end_pos,
..Default::default()
};
new_function.calc_slope();
self.end_pos = split_pos;
self.value_end_pos = split_pos_value;
self.calc_slope();
new_function
}
}
impl BinarySerializable for Function {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
self.data_start_offset.serialize(write)?;
self.value_start_pos.serialize(write)?;
self.positive_val_offset.serialize(write)?;
self.slope.serialize(write)?;
self.num_bits.serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Function> {
let data_start_offset = u64::deserialize(reader)?;
let value_start_pos = u64::deserialize(reader)?;
let offset = u64::deserialize(reader)?;
let slope = f32::deserialize(reader)?;
let num_bits = u8::deserialize(reader)?;
let interpolation = Function {
data_start_offset,
value_start_pos,
positive_val_offset: offset,
num_bits,
bit_unpacker: BitUnpacker::new(num_bits),
slope,
..Default::default()
};
Ok(interpolation)
}
}
#[derive(Clone, Debug)]
pub struct MultiLinearInterpolFooter {
pub num_vals: u64,
pub min_value: u64,
pub max_value: u64,
interpolations: Vec<Function>,
}
impl BinarySerializable for MultiLinearInterpolFooter {
fn serialize<W: Write>(&self, write: &mut W) -> io::Result<()> {
let mut out = vec![];
self.num_vals.serialize(&mut out)?;
self.min_value.serialize(&mut out)?;
self.max_value.serialize(&mut out)?;
self.interpolations.serialize(&mut out)?;
write.write_all(&out)?;
(out.len() as u32).serialize(write)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<MultiLinearInterpolFooter> {
let mut footer = MultiLinearInterpolFooter {
num_vals: u64::deserialize(reader)?,
min_value: u64::deserialize(reader)?,
max_value: u64::deserialize(reader)?,
interpolations: Vec::<Function>::deserialize(reader)?,
};
for (num, interpol) in footer.interpolations.iter_mut().enumerate() {
interpol.start_pos = CHUNK_SIZE * num as u64;
}
Ok(footer)
}
}
#[inline]
fn get_interpolation_position(doc: u64) -> usize {
let index = doc / CHUNK_SIZE;
index as usize
}
#[inline]
fn get_interpolation_function(doc: u64, interpolations: &[Function]) -> &Function {
&interpolations[get_interpolation_position(doc)]
}
impl FastFieldCodecReader for MultiLinearInterpolFastFieldReader {
/// Opens a fast field given a file.
fn open_from_bytes(bytes: &[u8]) -> io::Result<Self> {
let footer_len: u32 = (&bytes[bytes.len() - 4..]).deserialize()?;
let (_data, mut footer) = bytes.split_at(bytes.len() - (4 + footer_len) as usize);
let footer = MultiLinearInterpolFooter::deserialize(&mut footer)?;
Ok(MultiLinearInterpolFastFieldReader { footer })
}
#[inline]
fn get_u64(&self, doc: u64, data: &[u8]) -> u64 {
let interpolation = get_interpolation_function(doc, &self.footer.interpolations);
let doc = doc - interpolation.start_pos;
let calculated_value =
get_calculated_value(interpolation.value_start_pos, doc, interpolation.slope);
let diff = interpolation
.bit_unpacker
.get(doc, &data[interpolation.data_start_offset as usize..]);
(calculated_value + diff) - interpolation.positive_val_offset
}
#[inline]
fn min_value(&self) -> u64 {
self.footer.min_value
}
#[inline]
fn max_value(&self) -> u64 {
self.footer.max_value
}
}
#[inline]
fn get_slope(first_val: u64, last_val: u64, num_vals: u64) -> f32 {
((last_val as f64 - first_val as f64) / (num_vals as u64 - 1) as f64) as f32
}
#[inline]
fn get_calculated_value(first_val: u64, pos: u64, slope: f32) -> u64 {
(first_val as i64 + (pos as f32 * slope) as i64) as u64
}
/// Same as LinearInterpolFastFieldSerializer, but working on chunks of CHUNK_SIZE elements.
pub struct MultiLinearInterpolFastFieldSerializer {}
impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
const NAME: &'static str = "MultiLinearInterpol";
const ID: u8 = 3;
/// Creates a new fast field serializer.
fn serialize(
write: &mut impl Write,
fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
data_iter: impl Iterator<Item = u64>,
_data_iter1: impl Iterator<Item = u64>,
) -> io::Result<()> {
assert!(stats.min_value <= stats.max_value);
let first_val = fastfield_accessor.get_val(0);
let last_val = fastfield_accessor.get_val(stats.num_vals as u64 - 1);
let mut first_function = Function {
end_pos: stats.num_vals,
value_start_pos: first_val,
value_end_pos: last_val,
..Default::default()
};
first_function.calc_slope();
let mut interpolations = vec![first_function];
// Since we potentially apply multiple passes over the data, the data is cached.
// Multiple iteration can be expensive (merge with index sorting can add lot of overhead per
// iteration)
let data = data_iter.collect::<Vec<_>>();
//// let's split this into chunks of CHUNK_SIZE
for data_pos in (0..data.len() as u64).step_by(CHUNK_SIZE as usize).skip(1) {
let new_fun = {
let current_interpolation = interpolations.last_mut().unwrap();
current_interpolation.split(data_pos, data[data_pos as usize])
};
interpolations.push(new_fun);
}
// calculate offset and max (-> numbits) for each function
for interpolation in &mut interpolations {
let mut offset = 0;
let mut rel_positive_max = 0;
for (pos, actual_value) in data
[interpolation.start_pos as usize..interpolation.end_pos as usize]
.iter()
.cloned()
.enumerate()
{
let calculated_value = get_calculated_value(
interpolation.value_start_pos,
pos as u64,
interpolation.slope,
);
if calculated_value > actual_value {
// negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative values
// will be offset to 0
offset = offset.max(calculated_value - actual_value);
} else {
//positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
}
}
interpolation.positive_val_offset = offset;
interpolation.num_bits = compute_num_bits(rel_positive_max + offset);
}
let mut bit_packer = BitPacker::new();
let write = &mut CountingWriter::wrap(write);
for interpolation in &mut interpolations {
interpolation.data_start_offset = write.written_bytes();
let num_bits = interpolation.num_bits;
for (pos, actual_value) in data
[interpolation.start_pos as usize..interpolation.end_pos as usize]
.iter()
.cloned()
.enumerate()
{
let calculated_value = get_calculated_value(
interpolation.value_start_pos,
pos as u64,
interpolation.slope,
);
let diff = (actual_value + interpolation.positive_val_offset) - calculated_value;
bit_packer.write(diff, num_bits, write)?;
}
bit_packer.flush(write)?;
}
bit_packer.close(write)?;
let footer = MultiLinearInterpolFooter {
num_vals: stats.num_vals,
min_value: stats.min_value,
max_value: stats.max_value,
interpolations,
};
footer.serialize(write)?;
Ok(())
}
fn is_applicable(
_fastfield_accessor: &impl FastFieldDataAccess,
stats: FastFieldStats,
) -> bool {
if stats.num_vals < 5_000 {
return false;
}
// On serialization the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues.
// For this we take the maximum theroretical offset and add this to the max value.
// If this doesn't overflow the algortihm should be fine
let theorethical_maximum_offset = stats.max_value - stats.min_value;
if stats
.max_value
.checked_add(theorethical_maximum_offset)
.is_none()
{
return false;
}
true
}
/// estimation for linear interpolation is hard because, you don't know
/// where the local maxima are for the deviation of the calculated value and
/// the offset is also unknown.
fn estimate(fastfield_accessor: &impl FastFieldDataAccess, stats: FastFieldStats) -> f32 {
let first_val_in_first_block = fastfield_accessor.get_val(0);
let last_elem_in_first_chunk = CHUNK_SIZE.min(stats.num_vals);
let last_val_in_first_block =
fastfield_accessor.get_val(last_elem_in_first_chunk as u64 - 1);
let slope = get_slope(
first_val_in_first_block,
last_val_in_first_block,
stats.num_vals,
);
// let's sample at 0%, 5%, 10% .. 95%, 100%, but for the first block only
let sample_positions = (0..20)
.map(|pos| (last_elem_in_first_chunk as f32 / 100.0 * pos as f32 * 5.0) as usize)
.collect::<Vec<_>>();
let max_distance = sample_positions
.iter()
.map(|pos| {
let calculated_value =
get_calculated_value(first_val_in_first_block, *pos as u64, slope);
let actual_value = fastfield_accessor.get_val(*pos as u64);
distance(calculated_value, actual_value)
})
.max()
.unwrap();
// Estimate one block and extrapolate the cost to all blocks.
// the theory would be that we don't have the actual max_distance, but we are close within 50%
// threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance
//
let relative_max_value = (max_distance as f32 * 1.5) * 2.0;
let num_bits = compute_num_bits(relative_max_value as u64) as u64 * stats.num_vals as u64
// function metadata per block
+ 29 * (stats.num_vals / CHUNK_SIZE);
let num_bits_uncompressed = 64 * stats.num_vals;
num_bits as f32 / num_bits_uncompressed as f32
}
}
fn distance<T: Sub<Output = T> + Ord>(x: T, y: T) -> T {
if x < y {
y - x
} else {
x - y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::get_codec_test_data_sets;
fn create_and_validate(data: &[u64], name: &str) {
crate::tests::create_and_validate::<
MultiLinearInterpolFastFieldSerializer,
MultiLinearInterpolFastFieldReader,
>(data, name);
}
#[test]
fn test_with_codec_data_sets() {
let data_sets = get_codec_test_data_sets();
for (mut data, name) in data_sets {
create_and_validate(&data, name);
data.reverse();
create_and_validate(&data, name);
}
}
#[test]
fn test_simple() {
let data = (10..=20_u64).collect::<Vec<_>>();
create_and_validate(&data, "simple monotonically");
}
#[test]
fn border_cases_1() {
let data = (0..1024).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn border_case_2() {
let data = (0..1025).collect::<Vec<_>>();
create_and_validate(&data, "border case");
}
#[test]
fn rand() {
for _ in 0..10 {
let mut data = (5_000..20_000)
.map(|_| rand::random::<u64>() as u64)
.collect::<Vec<_>>();
create_and_validate(&data, "random");
data.reverse();
create_and_validate(&data, "random");
}
}
}

10
ownedbytes/Cargo.toml Normal file
View File

@@ -0,0 +1,10 @@
[package]
authors = ["Paul Masurel <paul@quickwit.io>", "Pascal Seitz <pascal@quickwit.io>"]
name = "ownedbytes"
version = "0.1.0"
edition = "2018"
description = "Expose data as static slice"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
stable_deref_trait = "1.2.0"

290
ownedbytes/src/lib.rs Normal file
View File

@@ -0,0 +1,290 @@
use stable_deref_trait::StableDeref;
use std::convert::TryInto;
use std::mem;
use std::ops::{Deref, Range};
use std::sync::Arc;
use std::{fmt, io};
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a static slice.
///
/// The backing object is required to be `StableDeref`.
#[derive(Clone)]
pub struct OwnedBytes {
data: &'static [u8],
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
}
impl OwnedBytes {
/// Creates an empty `OwnedBytes`.
pub fn empty() -> OwnedBytes {
OwnedBytes::new(&[][..])
}
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
data_holder: T,
) -> OwnedBytes {
let box_stable_deref = Arc::new(data_holder);
let bytes: &[u8] = box_stable_deref.as_ref();
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
OwnedBytes {
data,
box_stable_deref,
}
}
/// creates a fileslice that is just a view over a slice of the data.
pub fn slice(&self, range: Range<usize>) -> Self {
OwnedBytes {
data: &self.data[range],
box_stable_deref: self.box_stable_deref.clone(),
}
}
/// Returns the underlying slice of data.
/// `Deref` and `AsRef` are also available.
#[inline]
pub fn as_slice(&self) -> &[u8] {
self.data
}
/// Returns the len of the slice.
#[inline]
pub fn len(&self) -> usize {
self.data.len()
}
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
///
/// Left will hold `split_len` bytes.
///
/// This operation is cheap and does not require to copy any memory.
/// On the other hand, both `left` and `right` retain a handle over
/// the entire slice of memory. In other words, the memory will only
/// be released when both left and right are dropped.
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
let right_box_stable_deref = self.box_stable_deref.clone();
let left = OwnedBytes {
data: &self.data[..split_len],
box_stable_deref: self.box_stable_deref,
};
let right = OwnedBytes {
data: &self.data[split_len..],
box_stable_deref: right_box_stable_deref,
};
(left, right)
}
/// Returns true iff this `OwnedBytes` is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.as_slice().is_empty()
}
/// Drops the left most `advance_len` bytes.
///
/// See also [.clip(clip_len: usize))](#method.clip).
#[inline]
pub fn advance(&mut self, advance_len: usize) {
self.data = &self.data[advance_len..]
}
/// Reads an `u8` from the `OwnedBytes` and advance by one byte.
#[inline]
pub fn read_u8(&mut self) -> u8 {
assert!(!self.is_empty());
let byte = self.as_slice()[0];
self.advance(1);
byte
}
/// Reads an `u64` encoded as little-endian from the `OwnedBytes` and advance by 8 bytes.
#[inline]
pub fn read_u64(&mut self) -> u64 {
assert!(self.len() > 7);
let octlet: [u8; 8] = self.as_slice()[..8].try_into().unwrap();
self.advance(8);
u64::from_le_bytes(octlet)
}
}
impl fmt::Debug for OwnedBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// We truncate the bytes in order to make sure the debug string
// is not too long.
let bytes_truncated: &[u8] = if self.len() > 8 {
&self.as_slice()[..10]
} else {
self.as_slice()
};
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
}
}
impl Deref for OwnedBytes {
type Target = [u8];
#[inline]
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl io::Read for OwnedBytes {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
if data.len() >= buf.len() {
let buf_len = buf.len();
buf.copy_from_slice(&data[..buf_len]);
buf.len()
} else {
let data_len = data.len();
buf[..data_len].copy_from_slice(data);
data_len
}
};
self.advance(read_len);
Ok(read_len)
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
buf.extend(data);
data.len()
};
self.advance(read_len);
Ok(read_len)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let read_len = self.read(buf)?;
if read_len != buf.len() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer",
));
}
Ok(())
}
}
impl AsRef<[u8]> for OwnedBytes {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
#[cfg(test)]
mod tests {
use std::io::{self, Read};
use super::OwnedBytes;
#[test]
fn test_owned_bytes_debug() {
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
assert_eq!(
format!("{:?}", short_bytes),
"OwnedBytes([97, 98, 99, 100], len=4)"
);
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
assert_eq!(
format!("{:?}", long_bytes),
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
);
}
#[test]
fn test_owned_bytes_read() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
{
let mut buf = [0u8; 5];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
}
{
let mut buf = [0u8; 2];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"fg");
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
}
Ok(())
}
#[test]
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 5];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
assert_eq!(&buf, b"abcde");
Ok(())
}
#[test]
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 7];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf[..5], b"abcde");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_to_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = Vec::new();
bytes.read_to_end(&mut buf)?;
assert_eq!(buf.as_slice(), b"abcde".as_ref());
Ok(())
}
#[test]
fn test_owned_bytes_read_u8() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\xFF".as_ref());
assert_eq!(bytes.read_u8(), 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_u64() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF".as_ref());
assert_eq!(bytes.read_u64(), u64::MAX - 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_split() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
let (left, right) = bytes.split(3);
assert_eq!(left.as_slice(), b"abc");
assert_eq!(right.as_slice(), b"defghi");
}
#[test]
fn test_owned_bytes_split_boundary() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
{
let (left, right) = bytes.clone().split(0);
assert_eq!(left.as_slice(), b"");
assert_eq!(right.as_slice(), b"abcdefghi");
}
{
let (left, right) = bytes.split(9);
assert_eq!(left.as_slice(), b"abcdefghi");
assert_eq!(right.as_slice(), b"");
}
}
}

View File

@@ -14,3 +14,5 @@ edition = "2018"
[dependencies]
combine = {version="4", default-features=false, features=[] }
once_cell = "1.7.2"
regex ={ version = "1.5.4", default-features = false, features = ["std"] }

View File

@@ -1,21 +1,44 @@
use super::user_input_ast::{UserInputAst, UserInputBound, UserInputLeaf, UserInputLiteral};
use crate::Occur;
use combine::parser::char::{char, digit, letter, space, spaces, string};
use combine::parser::char::{char, digit, space, spaces, string};
use combine::parser::range::{take_while, take_while1};
use combine::parser::repeat::escaped;
use combine::parser::Parser;
use combine::{
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
};
use combine::{error::StringStreamError, parser::combinator::recognize};
use once_cell::sync::Lazy;
use regex::Regex;
fn field<'a>() -> impl Parser<&'a str, Output = String> {
(
(letter().or(char('_'))),
many(satisfy(|c: char| {
c.is_alphanumeric() || c == '_' || c == '-'
})),
)
.skip(char(':'))
.map(|(s1, s2): (char, String)| format!("{}{}", s1, s2))
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to special characters.
const SPECIAL_CHARS: &[char] = &[
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
];
const ESCAPED_SPECIAL_CHARS_PATTERN: &str = r#"\\(\+|\^|`|:|\{|\}|"|\[|\]|\(|\)|\~|!|\\|\*| )"#;
/// Parses a field_name
/// A field name must have at least one character and be followed by a colon.
/// All characters are allowed including special characters `SPECIAL_CHARS`, but these
/// need to be escaped with a backslack character '\'.
fn field_name<'a>() -> impl Parser<&'a str, Output = String> {
static ESCAPED_SPECIAL_CHARS_RE: Lazy<Regex> =
Lazy::new(|| Regex::new(ESCAPED_SPECIAL_CHARS_PATTERN).unwrap());
recognize::<String, _, _>(escaped(
(
take_while1(|c| !SPECIAL_CHARS.contains(&c) && c != '-'),
take_while(|c| !SPECIAL_CHARS.contains(&c)),
),
'\\',
satisfy(|c| SPECIAL_CHARS.contains(&c)),
))
.skip(char(':'))
.map(|s| ESCAPED_SPECIAL_CHARS_RE.replace_all(&s, "$1").to_string())
.and_then(|s: String| match s.is_empty() {
true => Err(StringStreamError::UnexpectedParse),
_ => Ok(s),
})
}
fn word<'a>() -> impl Parser<&'a str, Output = String> {
@@ -98,7 +121,7 @@ fn term_val<'a>() -> impl Parser<&'a str, Output = String> {
fn term_query<'a>() -> impl Parser<&'a str, Output = UserInputLiteral> {
let term_val_with_field = negative_number().or(term_val());
(field(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
(field_name(), term_val_with_field).map(|(field_name, phrase)| UserInputLiteral {
field_name: Some(field_name),
phrase,
})
@@ -195,7 +218,7 @@ fn range<'a>() -> impl Parser<&'a str, Output = UserInputLeaf> {
);
(
optional(field()).skip(spaces()),
optional(field_name()).skip(spaces()),
// try elastic first, if it matches, the range is unbounded
attempt(elastic_unbounded_range).or(lower_to_upper),
)
@@ -464,21 +487,21 @@ mod test {
#[test]
fn test_parse_elastic_query_ranges() {
test_parse_query_to_ast_helper("title: >a", "title:{\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title:>=a", "title:[\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title: <a", "title:{\"*\" TO \"a\"}");
test_parse_query_to_ast_helper("title:<=a", "title:{\"*\" TO \"a\"]");
test_parse_query_to_ast_helper("title:<=bsd", "title:{\"*\" TO \"bsd\"]");
test_parse_query_to_ast_helper("title: >a", "\"title\":{\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title:>=a", "\"title\":[\"a\" TO \"*\"}");
test_parse_query_to_ast_helper("title: <a", "\"title\":{\"*\" TO \"a\"}");
test_parse_query_to_ast_helper("title:<=a", "\"title\":{\"*\" TO \"a\"]");
test_parse_query_to_ast_helper("title:<=bsd", "\"title\":{\"*\" TO \"bsd\"]");
test_parse_query_to_ast_helper("weight: >70", "weight:{\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight:>=70", "weight:[\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <70", "weight:{\"*\" TO \"70\"}");
test_parse_query_to_ast_helper("weight:<=70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: >60.7", "weight:{\"60.7\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: >70", "\"weight\":{\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight:>=70", "\"weight\":[\"70\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <70", "\"weight\":{\"*\" TO \"70\"}");
test_parse_query_to_ast_helper("weight:<=70", "\"weight\":{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: >60.7", "\"weight\":{\"60.7\" TO \"*\"}");
test_parse_query_to_ast_helper("weight: <= 70", "weight:{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: <= 70", "\"weight\":{\"*\" TO \"70\"]");
test_parse_query_to_ast_helper("weight: <= 70.5", "weight:{\"*\" TO \"70.5\"]");
test_parse_query_to_ast_helper("weight: <= 70.5", "\"weight\":{\"*\" TO \"70.5\"]");
}
#[test]
@@ -491,22 +514,43 @@ mod test {
#[test]
fn test_field_name() -> TestParseResult {
assert_eq!(
super::field().parse("my-field-name:a")?,
("my-field-name".to_string(), "a")
super::field_name().parse(".my.field.name:a"),
Ok((".my.field.name".to_string(), "a"))
);
assert_eq!(
super::field().parse("my_field_name:a")?,
("my_field_name".to_string(), "a")
super::field_name().parse("my\\ field\\ name:a"),
Ok(("my field name".to_string(), "a"))
);
assert!(super::field().parse(":a").is_err());
assert!(super::field().parse("-my_field:a").is_err());
assert!(super::field_name().parse("my field:a").is_err());
assert_eq!(
super::field().parse("_my_field:a")?,
super::field_name().parse("\\(1\\+1\\):2"),
Ok(("(1+1)".to_string(), "2"))
);
assert_eq!(
super::field_name().parse("my_field_name:a"),
Ok(("my_field_name".to_string(), "a"))
);
assert!(super::field_name().parse("my_field_name").is_err());
assert!(super::field_name().parse(":a").is_err());
assert!(super::field_name().parse("-my_field:a").is_err());
assert_eq!(
super::field_name().parse("_my_field:a")?,
("_my_field".to_string(), "a")
);
Ok(())
}
#[test]
fn test_field_name_re() {
let escaped_special_chars_re = Regex::new(ESCAPED_SPECIAL_CHARS_PATTERN).unwrap();
for special_char in SPECIAL_CHARS.iter() {
assert_eq!(
escaped_special_chars_re.replace_all(&format!("\\{}", special_char), "$1"),
special_char.to_string()
);
}
}
#[test]
fn test_range_parser() {
// testing the range() parser separately
@@ -600,12 +644,14 @@ mod test {
#[test]
fn test_single_term_with_field() {
test_parse_query_to_ast_helper("abc:toto", "abc:\"toto\"");
test_parse_query_to_ast_helper("abc:toto", "\"abc\":\"toto\"");
}
#[test]
fn test_single_term_with_float() {
test_parse_query_to_ast_helper("abc:1.1", "abc:\"1.1\"");
test_parse_query_to_ast_helper("abc:1.1", "\"abc\":\"1.1\"");
test_parse_query_to_ast_helper("a.b.c:1.1", "\"a.b.c\":\"1.1\"");
test_parse_query_to_ast_helper("a\\ b\\ c:1.1", "\"a b c\":\"1.1\"");
}
#[test]
@@ -621,22 +667,27 @@ mod test {
#[test]
fn test_parse_test_query_other() {
test_parse_query_to_ast_helper("(+a +b) d", "(*(+\"a\" +\"b\") *\"d\")");
test_parse_query_to_ast_helper("+abc:toto", "abc:\"toto\"");
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+abc:\"toto\" -\"titi\")");
test_parse_query_to_ast_helper("-abc:toto", "(-abc:\"toto\")");
test_parse_query_to_ast_helper("abc:a b", "(*abc:\"a\" *\"b\")");
test_parse_query_to_ast_helper("abc:\"a b\"", "abc:\"a b\"");
test_parse_query_to_ast_helper("foo:[1 TO 5]", "foo:[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("+abc:toto", "\"abc\":\"toto\"");
test_parse_query_to_ast_helper("+a\\+b\\+c:toto", "\"a+b+c\":\"toto\"");
test_parse_query_to_ast_helper("(+abc:toto -titi)", "(+\"abc\":\"toto\" -\"titi\")");
test_parse_query_to_ast_helper("-abc:toto", "(-\"abc\":\"toto\")");
test_is_parse_err("--abc:toto");
test_parse_query_to_ast_helper("abc:a b", "(*\"abc\":\"a\" *\"b\")");
test_parse_query_to_ast_helper("abc:\"a b\"", "\"abc\":\"a b\"");
test_parse_query_to_ast_helper("foo:[1 TO 5]", "\"foo\":[\"1\" TO \"5\"]");
}
#[test]
fn test_parse_query_with_range() {
test_parse_query_to_ast_helper("[1 TO 5]", "[\"1\" TO \"5\"]");
test_parse_query_to_ast_helper("foo:{a TO z}", "foo:{\"a\" TO \"z\"}");
test_parse_query_to_ast_helper("foo:[1 TO toto}", "foo:[\"1\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "foo:{\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[1 TO *}", "foo:[\"1\" TO \"*\"}");
test_parse_query_to_ast_helper("foo:[1.1 TO *}", "foo:[\"1.1\" TO \"*\"}");
test_parse_query_to_ast_helper("foo:{a TO z}", "\"foo\":{\"a\" TO \"z\"}");
test_parse_query_to_ast_helper("foo:[1 TO toto}", "\"foo\":[\"1\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[* TO toto}", "\"foo\":{\"*\" TO \"toto\"}");
test_parse_query_to_ast_helper("foo:[1 TO *}", "\"foo\":[\"1\" TO \"*\"}");
test_parse_query_to_ast_helper(
"1.2.foo.bar:[1.1 TO *}",
"\"1.2.foo.bar\":[\"1.1\" TO \"*\"}",
);
test_is_parse_err("abc + ");
}
}

View File

@@ -24,7 +24,7 @@ impl Debug for UserInputLeaf {
ref upper,
} => {
if let Some(ref field) = field {
write!(formatter, "{}:", field)?;
write!(formatter, "\"{}\":", field)?;
}
lower.display_lower(formatter)?;
write!(formatter, " TO ")?;
@@ -45,7 +45,7 @@ pub struct UserInputLiteral {
impl fmt::Debug for UserInputLiteral {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self.field_name {
Some(ref field_name) => write!(formatter, "{}:\"{}\"", field_name, self.phrase),
Some(ref field_name) => write!(formatter, "\"{}\":\"{}\"", field_name, self.phrase),
None => write!(formatter, "\"{}\"", self.phrase),
}
}
@@ -79,7 +79,7 @@ impl UserInputBound {
match *self {
UserInputBound::Inclusive(ref contents) => contents,
UserInputBound::Exclusive(ref contents) => contents,
UserInputBound::Unbounded => &"*",
UserInputBound::Unbounded => "*",
}
}
}

View File

@@ -297,10 +297,8 @@ impl Collector for FacetCollector {
if depth == collapse_depth + 1 {
collapsed_id = collapse_facet_ords.len();
collapse_facet_ords.push(facet_streamer.term_ord());
collapse_mapping.push(collapsed_id);
} else {
collapse_mapping.push(collapsed_id);
}
collapse_mapping.push(collapsed_id);
}
break;
}

View File

@@ -16,7 +16,7 @@ use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::Field;
use crate::{Score, SegmentReader, TantivyError};
/// The `FilterCollector` collector filters docs using a u64 fast field value and a predicate.
/// The `FilterCollector` collector filters docs using a fast field value and a predicate.
/// Only the documents for which the predicate returned "true" will be passed on to the next collector.
///
/// ```rust

View File

@@ -276,7 +276,7 @@ mod tests {
let mut collectors = MultiCollector::new();
let topdocs_handler = collectors.add_collector(TopDocs::with_limit(2));
let count_handler = collectors.add_collector(Count);
let mut multifruits = searcher.search(&query, &mut collectors).unwrap();
let mut multifruits = searcher.search(&query, &collectors).unwrap();
assert_eq!(count_handler.extract(&mut multifruits), 5);
assert_eq!(topdocs_handler.extract(&mut multifruits).len(), 2);

View File

@@ -1080,7 +1080,7 @@ mod tests {
query: &str,
query_field: Field,
schema: Schema,
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
mut doc_adder: impl FnMut(&mut IndexWriter),
) -> (Index, Box<dyn Query>) {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 10_000_000).unwrap();

View File

@@ -1,18 +1,15 @@
mod bitset;
mod composite_file;
mod counting_writer;
mod serialize;
mod vint;
pub use self::bitset::BitSet;
pub(crate) use self::bitset::TinySet;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::counting_writer::CountingWriter;
pub use self::serialize::{BinarySerializable, DeserializeFrom, FixedSize};
pub use self::vint::{
pub use byteorder::LittleEndian as Endianness;
pub use common::CountingWriter;
pub use common::{
read_u32_vint, read_u32_vint_no_advance, serialize_vint_u32, write_u32_vint, VInt,
};
pub use byteorder::LittleEndian as Endianness;
pub use common::{BinarySerializable, DeserializeFrom, FixedSize};
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
///
@@ -103,8 +100,8 @@ pub fn u64_to_f64(val: u64) -> f64 {
#[cfg(test)]
pub(crate) mod test {
pub use super::serialize::test::fixed_size_test;
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
use common::{BinarySerializable, FixedSize};
use proptest::prelude::*;
use std::f64;
use tantivy_bitpacker::compute_num_bits;
@@ -118,6 +115,12 @@ pub(crate) mod test {
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
}
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
}
proptest! {
#[test]
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {

View File

@@ -42,7 +42,7 @@ fn load_metas(
"Meta file does not contain valid utf8 file.".to_string(),
)
})?;
IndexMeta::deserialize(&meta_string, &inventory)
IndexMeta::deserialize(&meta_string, inventory)
.map_err(|e| {
DataCorruption::new(
META_FILEPATH.to_path_buf(),
@@ -604,7 +604,7 @@ mod tests {
.is_ok());
assert!(Index::exists(&directory).unwrap());
assert!(Index::create(
directory.clone(),
directory,
Schema::builder().build(),
IndexSettings::default()
)

View File

@@ -369,7 +369,6 @@ mod tests {
schema::{Schema, TEXT},
IndexSettings, IndexSortByField, Order,
};
use serde_json;
#[test]
fn test_serialize_metas() {

View File

@@ -16,7 +16,6 @@ pub use self::index_meta::{
pub use self::inverted_index_reader::InvertedIndexReader;
pub use self::searcher::Searcher;
pub use self::segment::Segment;
pub use self::segment::SerializableSegment;
pub use self::segment_component::SegmentComponent;
pub use self::segment_id::SegmentId;
pub use self::segment_reader::SegmentReader;

View File

@@ -1,13 +1,12 @@
use super::SegmentComponent;
use crate::core::Index;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory;
use crate::directory::{FileSlice, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema;
use crate::Opstamp;
use crate::{core::Index, indexer::doc_id_mapping::DocIdMapping};
use std::fmt;
use std::path::PathBuf;
@@ -90,20 +89,3 @@ impl Segment {
Ok(write)
}
}
pub trait SerializableSegment {
/// Writes a view of a segment by pushing information
/// to the `SegmentSerializer`.
///
/// # Returns
/// The number of documents in the segment.
///
/// doc_id_map is used when index is created and sorted, to map to the new doc_id order.
/// It is not used by the `IndexMerger`, since the doc_id_mapping on cross-segments works
/// differently
fn write(
&self,
serializer: SegmentSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> crate::Result<u32>;
}

View File

@@ -22,7 +22,7 @@ use std::sync::atomic;
pub struct SegmentId(Uuid);
#[cfg(test)]
static AUTO_INC_COUNTER: Lazy<atomic::AtomicUsize> = Lazy::new(|| atomic::AtomicUsize::default());
static AUTO_INC_COUNTER: Lazy<atomic::AtomicUsize> = Lazy::new(atomic::AtomicUsize::default);
#[cfg(test)]
const ZERO_ARRAY: [u8; 8] = [0u8; 8];
@@ -108,6 +108,12 @@ impl fmt::Debug for SegmentId {
}
}
impl fmt::Display for SegmentId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Seg({:?})", self.short_uuid_string())
}
}
impl PartialOrd for SegmentId {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))

View File

@@ -1,4 +1,3 @@
use crate::common::HasLen;
use crate::core::InvertedIndexReader;
use crate::core::Segment;
use crate::core::SegmentComponent;
@@ -32,9 +31,6 @@ use std::{collections::HashMap, io};
///
/// The segment reader has a very low memory footprint,
/// as close to all of the memory data is mmapped.
///
///
/// TODO fix not decoding docfreq
#[derive(Clone)]
pub struct SegmentReader {
inv_idx_reader_cache: Arc<RwLock<HashMap<Field, Arc<InvertedIndexReader>>>>,
@@ -57,17 +53,12 @@ pub struct SegmentReader {
impl SegmentReader {
/// Returns the highest document id ever attributed in
/// this segment + 1.
/// Today, `tantivy` does not handle deletes, so it happens
/// to also be the number of documents in the index.
pub fn max_doc(&self) -> DocId {
self.max_doc
}
/// Returns the number of documents.
/// Returns the number of alive documents.
/// Deleted documents are not counted.
///
/// Today, `tantivy` does not handle deletes so max doc and
/// num_docs are the same.
pub fn num_docs(&self) -> DocId {
self.num_docs
}
@@ -81,7 +72,7 @@ impl SegmentReader {
/// deleted in the segment.
pub fn num_deleted_docs(&self) -> DocId {
self.delete_bitset()
.map(|delete_set| delete_set.len() as DocId)
.map(|delete_set| delete_set.num_deleted() as DocId)
.unwrap_or(0u32)
}
@@ -329,6 +320,32 @@ mod test {
use crate::schema::{Schema, Term, STORED, TEXT};
use crate::DocId;
#[test]
fn test_num_alive() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let name = schema.get_field("name").unwrap();
{
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(name => "tantivy"));
index_writer.add_document(doc!(name => "horse"));
index_writer.add_document(doc!(name => "jockey"));
index_writer.add_document(doc!(name => "cap"));
// we should now have one segment with two docs
index_writer.delete_term(Term::from_field_text(name, "horse"));
index_writer.delete_term(Term::from_field_text(name, "cap"));
// ok, now we should have a deleted doc
index_writer.commit()?;
}
let searcher = index.reader()?.searcher();
assert_eq!(2, searcher.segment_reader(0).num_docs());
assert_eq!(4, searcher.segment_reader(0).max_doc());
Ok(())
}
#[test]
fn test_alive_docs_iterator() -> crate::Result<()> {
let mut schema_builder = Schema::builder();

View File

@@ -211,7 +211,7 @@ mod tests {
assert_eq!(right.read_bytes()?.as_slice(), b"");
}
{
let (left, right) = file_slice.clone().split_from_end(2);
let (left, right) = file_slice.split_from_end(2);
assert_eq!(left.read_bytes()?.as_slice(), b"abcd");
assert_eq!(right.read_bytes()?.as_slice(), b"ef");
}

View File

@@ -402,10 +402,8 @@ mod tests_mmap_specific {
// The file should still be in the list of managed file and
// eventually be deleted once mmap is released.
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
assert!(!managed_directory.exists(test_path1).unwrap());
} else {
assert!(!managed_directory.exists(test_path1).unwrap());
}
assert!(!managed_directory.exists(test_path1).unwrap());
}
#[test]
@@ -430,7 +428,7 @@ mod tests_mmap_specific {
assert_eq!(read_file.as_slice(), &[3u8, 4u8, 5u8]);
assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone();
let mut corrupted_path = tempdir_path;
corrupted_path.push(test_path2);
let mut file = OpenOptions::new().write(true).open(&corrupted_path)?;
file.write_all(&[255u8])?;

View File

@@ -28,7 +28,9 @@ pub use self::file_slice::{FileHandle, FileSlice};
pub use self::owned_bytes::OwnedBytes;
pub use self::ram_directory::RamDirectory;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
use std::io::{self, BufWriter, Write};
pub use common::AntiCallToken;
pub use common::TerminatingWrite;
use std::io::BufWriter;
use std::path::PathBuf;
/// Outcome of the Garbage collection
@@ -50,47 +52,6 @@ pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
///
/// The point is that while the type is public, it cannot be built by anyone
/// outside of this module.
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
#[cfg(test)]
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
/// Write object for Directory.
///
/// `WritePtr` are required to implement both Write

View File

@@ -1,290 +1,11 @@
use crate::directory::FileHandle;
use stable_deref_trait::StableDeref;
use std::convert::TryInto;
use std::mem;
use std::ops::{Deref, Range};
use std::sync::Arc;
use std::{fmt, io};
use std::io;
use std::ops::Range;
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a static slice.
///
/// The backing object is required to be `StableDeref`.
#[derive(Clone)]
pub struct OwnedBytes {
data: &'static [u8],
box_stable_deref: Arc<dyn Deref<Target = [u8]> + Sync + Send>,
}
pub use ownedbytes::OwnedBytes;
impl FileHandle for OwnedBytes {
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
Ok(self.slice(range))
}
}
impl OwnedBytes {
/// Creates an empty `OwnedBytes`.
pub fn empty() -> OwnedBytes {
OwnedBytes::new(&[][..])
}
/// Creates an `OwnedBytes` intance given a `StableDeref` object.
pub fn new<T: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync>(
data_holder: T,
) -> OwnedBytes {
let box_stable_deref = Arc::new(data_holder);
let bytes: &[u8] = box_stable_deref.as_ref();
let data = unsafe { mem::transmute::<_, &'static [u8]>(bytes.deref()) };
OwnedBytes {
data,
box_stable_deref,
}
}
/// creates a fileslice that is just a view over a slice of the data.
pub fn slice(&self, range: Range<usize>) -> Self {
OwnedBytes {
data: &self.data[range],
box_stable_deref: self.box_stable_deref.clone(),
}
}
/// Returns the underlying slice of data.
/// `Deref` and `AsRef` are also available.
#[inline]
pub fn as_slice(&self) -> &[u8] {
self.data
}
/// Returns the len of the slice.
#[inline]
pub fn len(&self) -> usize {
self.data.len()
}
/// Splits the OwnedBytes into two OwnedBytes `(left, right)`.
///
/// Left will hold `split_len` bytes.
///
/// This operation is cheap and does not require to copy any memory.
/// On the other hand, both `left` and `right` retain a handle over
/// the entire slice of memory. In other words, the memory will only
/// be released when both left and right are dropped.
pub fn split(self, split_len: usize) -> (OwnedBytes, OwnedBytes) {
let right_box_stable_deref = self.box_stable_deref.clone();
let left = OwnedBytes {
data: &self.data[..split_len],
box_stable_deref: self.box_stable_deref,
};
let right = OwnedBytes {
data: &self.data[split_len..],
box_stable_deref: right_box_stable_deref,
};
(left, right)
}
/// Returns true iff this `OwnedBytes` is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.as_slice().is_empty()
}
/// Drops the left most `advance_len` bytes.
///
/// See also [.clip(clip_len: usize))](#method.clip).
#[inline]
pub fn advance(&mut self, advance_len: usize) {
self.data = &self.data[advance_len..]
}
/// Reads an `u8` from the `OwnedBytes` and advance by one byte.
pub fn read_u8(&mut self) -> u8 {
assert!(!self.is_empty());
let byte = self.as_slice()[0];
self.advance(1);
byte
}
/// Reads an `u64` encoded as little-endian from the `OwnedBytes` and advance by 8 bytes.
pub fn read_u64(&mut self) -> u64 {
assert!(self.len() > 7);
let octlet: [u8; 8] = self.as_slice()[..8].try_into().unwrap();
self.advance(8);
u64::from_le_bytes(octlet)
}
}
impl fmt::Debug for OwnedBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// We truncate the bytes in order to make sure the debug string
// is not too long.
let bytes_truncated: &[u8] = if self.len() > 8 {
&self.as_slice()[..10]
} else {
self.as_slice()
};
write!(f, "OwnedBytes({:?}, len={})", bytes_truncated, self.len())
}
}
impl Deref for OwnedBytes {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl io::Read for OwnedBytes {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
if data.len() >= buf.len() {
let buf_len = buf.len();
buf.copy_from_slice(&data[..buf_len]);
buf.len()
} else {
let data_len = data.len();
buf[..data_len].copy_from_slice(data);
data_len
}
};
self.advance(read_len);
Ok(read_len)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
let read_len = {
let data = self.as_slice();
buf.extend(data);
data.len()
};
self.advance(read_len);
Ok(read_len)
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let read_len = self.read(buf)?;
if read_len != buf.len() {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"failed to fill whole buffer",
));
}
Ok(())
}
}
impl AsRef<[u8]> for OwnedBytes {
fn as_ref(&self) -> &[u8] {
self.as_slice()
}
}
#[cfg(test)]
mod tests {
use std::io::{self, Read};
use super::OwnedBytes;
#[test]
fn test_owned_bytes_debug() {
let short_bytes = OwnedBytes::new(b"abcd".as_ref());
assert_eq!(
format!("{:?}", short_bytes),
"OwnedBytes([97, 98, 99, 100], len=4)"
);
let long_bytes = OwnedBytes::new(b"abcdefghijklmnopq".as_ref());
assert_eq!(
format!("{:?}", long_bytes),
"OwnedBytes([97, 98, 99, 100, 101, 102, 103, 104, 105, 106], len=17)"
);
}
#[test]
fn test_owned_bytes_read() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcdefghiklmnopqrstuvwxyz".as_ref());
{
let mut buf = [0u8; 5];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"fghiklmnopqrstuvwxyz")
}
{
let mut buf = [0u8; 2];
bytes.read_exact(&mut buf[..]).unwrap();
assert_eq!(&buf, b"fg");
assert_eq!(bytes.as_slice(), b"hiklmnopqrstuvwxyz")
}
Ok(())
}
#[test]
fn test_owned_bytes_read_right_at_the_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 5];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf, b"abcde");
assert_eq!(bytes.as_slice(), b"");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
assert_eq!(&buf, b"abcde");
Ok(())
}
#[test]
fn test_owned_bytes_read_incomplete() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = [0u8; 7];
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 5);
assert_eq!(&buf[..5], b"abcde");
assert_eq!(bytes.read(&mut buf[..]).unwrap(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_to_end() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"abcde".as_ref());
let mut buf = Vec::new();
bytes.read_to_end(&mut buf)?;
assert_eq!(buf.as_slice(), b"abcde".as_ref());
Ok(())
}
#[test]
fn test_owned_bytes_read_u8() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\xFF".as_ref());
assert_eq!(bytes.read_u8(), 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_read_u64() -> io::Result<()> {
let mut bytes = OwnedBytes::new(b"\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF".as_ref());
assert_eq!(bytes.read_u64(), u64::MAX - 255);
assert_eq!(bytes.len(), 0);
Ok(())
}
#[test]
fn test_owned_bytes_split() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
let (left, right) = bytes.split(3);
assert_eq!(left.as_slice(), b"abc");
assert_eq!(right.as_slice(), b"defghi");
}
#[test]
fn test_owned_bytes_split_boundary() {
let bytes = OwnedBytes::new(b"abcdefghi".as_ref());
{
let (left, right) = bytes.clone().split(0);
assert_eq!(left.as_slice(), b"");
assert_eq!(right.as_slice(), b"abcdefghi");
}
{
let (left, right) = bytes.split(9);
assert_eq!(left.as_slice(), b"abcdefghi");
assert_eq!(right.as_slice(), b"");
}
}
}

View File

@@ -46,7 +46,7 @@ impl Drop for VecWriter {
fn drop(&mut self) {
if !self.is_flushed {
panic!(
"You forgot to flush {:?} before its writter got Drop. Do not rely on drop.",
"You forgot to flush {:?} before its writter got Drop. Do not rely on drop. This also occurs when the indexer crashed, so you may want to check the logs for the root cause.",
self.path
)
}

View File

@@ -166,26 +166,26 @@ fn test_write_create_the_file(directory: &dyn Directory) {
fn test_directory_delete(directory: &dyn Directory) -> crate::Result<()> {
let test_path: &'static Path = Path::new("some_path_for_test");
assert!(directory.open_read(test_path).is_err());
let mut write_file = directory.open_write(&test_path)?;
let mut write_file = directory.open_write(test_path)?;
write_file.write_all(&[1, 2, 3, 4])?;
write_file.flush()?;
{
let read_handle = directory.open_read(&test_path)?.read_bytes()?;
let read_handle = directory.open_read(test_path)?.read_bytes()?;
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
// Mapped files can't be deleted on Windows
if !cfg!(windows) {
assert!(directory.delete(&test_path).is_ok());
assert!(directory.delete(test_path).is_ok());
assert_eq!(read_handle.as_slice(), &[1u8, 2u8, 3u8, 4u8]);
}
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
}
if cfg!(windows) {
assert!(directory.delete(&test_path).is_ok());
assert!(directory.delete(test_path).is_ok());
}
assert!(directory.open_read(&test_path).is_err());
assert!(directory.delete(&test_path).is_err());
assert!(directory.open_read(test_path).is_err());
assert!(directory.delete(test_path).is_err());
Ok(())
}

View File

@@ -1,6 +1,5 @@
use std::io;
use crate::fastfield::serializer::FastFieldSerializer;
use crate::schema::{Document, Field, Value};
use crate::DocId;
use crate::{
@@ -84,11 +83,11 @@ impl BytesFastFieldWriter {
&'a self,
doc_id_map: Option<&'b DocIdMapping>,
) -> impl Iterator<Item = &'b [u8]> {
let doc_id_iter = if let Some(doc_id_map) = doc_id_map {
Box::new(doc_id_map.iter_old_doc_ids().cloned()) as Box<dyn Iterator<Item = u32>>
let doc_id_iter: Box<dyn Iterator<Item = u32>> = if let Some(doc_id_map) = doc_id_map {
Box::new(doc_id_map.iter_old_doc_ids())
} else {
Box::new(self.doc_index.iter().enumerate().map(|el| el.0 as u32))
as Box<dyn Iterator<Item = u32>>
let max_doc = self.doc_index.len() as u32;
Box::new(0..max_doc)
};
doc_id_iter.map(move |doc_id| self.get_values_for_doc_id(doc_id))
}

View File

@@ -91,6 +91,10 @@ impl DeleteBitSet {
b & (1u8 << shift) != 0
}
/// The number of deleted docs
pub fn num_deleted(&self) -> usize {
self.num_deleted
}
/// Summarize total space usage of this bitset.
pub fn space_usage(&self) -> ByteCount {
self.data.len()

View File

@@ -29,12 +29,13 @@ pub use self::delete::DeleteBitSet;
pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader;
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
pub use self::reader::BitpackedFastFieldReader;
pub(crate) use self::reader::BitpackedFastFieldReader;
pub use self::reader::DynamicFastFieldReader;
pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders;
pub use self::serializer::CompositeFastFieldSerializer;
pub use self::serializer::FastFieldSerializer;
pub use self::serializer::FastFieldDataAccess;
pub use self::serializer::FastFieldStats;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::schema::Cardinality;
use crate::schema::FieldType;
@@ -213,15 +214,14 @@ mod tests {
use super::*;
use crate::common::CompositeFile;
use crate::common::HasLen;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::fastfield::BitpackedFastFieldReader;
use crate::merge_policy::NoMergePolicy;
use crate::schema::Field;
use crate::schema::Schema;
use crate::schema::FAST;
use crate::schema::{Document, IntOptions};
use crate::{Index, SegmentId, SegmentReader};
use common::HasLen;
use once_cell::sync::Lazy;
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
@@ -239,7 +239,7 @@ mod tests {
#[test]
pub fn test_fastfield() {
let test_fastfield = BitpackedFastFieldReader::<u64>::from(vec![100, 200, 300]);
let test_fastfield = DynamicFastFieldReader::<u64>::from(vec![100, 200, 300]);
assert_eq!(test_fastfield.get(0), 100);
assert_eq!(test_fastfield.get(1), 200);
assert_eq!(test_fastfield.get(2), 300);
@@ -267,11 +267,11 @@ mod tests {
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 36 as usize);
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 37);
let composite_file = CompositeFile::open(&file)?;
let file = composite_file.open_read(*FIELD).unwrap();
let fast_field_reader = BitpackedFastFieldReader::<u64>::open(file)?;
let fast_field_reader = DynamicFastFieldReader::<u64>::open(file)?;
assert_eq!(fast_field_reader.get(0), 13u64);
assert_eq!(fast_field_reader.get(1), 14u64);
assert_eq!(fast_field_reader.get(2), 2u64);
@@ -298,12 +298,12 @@ mod tests {
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
serializer.close()?;
}
let file = directory.open_read(&path)?;
assert_eq!(file.len(), 61 as usize);
let file = directory.open_read(path)?;
assert_eq!(file.len(), 62);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = BitpackedFastFieldReader::<u64>::open(data)?;
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
assert_eq!(fast_field_reader.get(0), 4u64);
assert_eq!(fast_field_reader.get(1), 14_082_001u64);
assert_eq!(fast_field_reader.get(2), 3_052u64);
@@ -334,12 +334,12 @@ mod tests {
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 34 as usize);
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 35);
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = BitpackedFastFieldReader::<u64>::open(data)?;
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
for doc in 0..10_000 {
assert_eq!(fast_field_reader.get(doc), 100_000u64);
}
@@ -366,12 +366,12 @@ mod tests {
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 80042 as usize);
let file = directory.open_read(path).unwrap();
assert_eq!(file.len(), 80043);
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = BitpackedFastFieldReader::<u64>::open(data)?;
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
assert_eq!(fast_field_reader.get(0), 0u64);
for doc in 1..10_001 {
assert_eq!(
@@ -405,12 +405,13 @@ mod tests {
.unwrap();
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
assert_eq!(file.len(), 17709 as usize);
let file = directory.open_read(path).unwrap();
//assert_eq!(file.len(), 17710 as usize); //bitpacked size
assert_eq!(file.len(), 10175_usize); // linear interpol size
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = BitpackedFastFieldReader::<i64>::open(data)?;
let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?;
assert_eq!(fast_field_reader.min_value(), -100i64);
assert_eq!(fast_field_reader.max_value(), 9_999i64);
@@ -446,11 +447,11 @@ mod tests {
serializer.close().unwrap();
}
let file = directory.open_read(&path).unwrap();
let file = directory.open_read(path).unwrap();
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(i64_field).unwrap();
let fast_field_reader = BitpackedFastFieldReader::<i64>::open(data)?;
let fast_field_reader = DynamicFastFieldReader::<i64>::open(data)?;
assert_eq!(fast_field_reader.get(0u32), 0i64);
}
Ok(())
@@ -479,11 +480,11 @@ mod tests {
fast_field_writers.serialize(&mut serializer, &HashMap::new(), None)?;
serializer.close()?;
}
let file = directory.open_read(&path)?;
let file = directory.open_read(path)?;
{
let fast_fields_composite = CompositeFile::open(&file)?;
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = BitpackedFastFieldReader::<u64>::open(data)?;
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data)?;
let mut a = 0u64;
for _ in 0..n {
@@ -627,7 +628,7 @@ mod bench {
let directory: RamDirectory = RamDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
@@ -641,7 +642,7 @@ mod bench {
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
b.iter(|| {
let n = test::black_box(7000u32);
@@ -661,7 +662,7 @@ mod bench {
let directory: RamDirectory = RamDirectory::create();
{
let write: WritePtr = directory.open_write(Path::new("test")).unwrap();
let mut serializer = FastFieldSerializer::from_write(write).unwrap();
let mut serializer = CompositeFastFieldSerializer::from_write(write).unwrap();
let mut fast_field_writers = FastFieldsWriter::from_schema(&SCHEMA);
for &x in &permutation {
fast_field_writers.add_document(&doc!(*FIELD=>x));
@@ -675,7 +676,7 @@ mod bench {
{
let fast_fields_composite = CompositeFile::open(&file).unwrap();
let data = fast_fields_composite.open_read(*FIELD).unwrap();
let fast_field_reader = FastFieldReader::<u64>::open(data).unwrap();
let fast_field_reader = DynamicFastFieldReader::<u64>::open(data).unwrap();
b.iter(|| {
let n = test::black_box(1000u32);

View File

@@ -90,7 +90,7 @@ mod tests {
{
let parser = QueryParser::for_index(&index, vec![date_field]);
let query = parser
.parse_query(&format!("\"{}\"", first_time_stamp.to_rfc3339()).to_string())
.parse_query(&format!("\"{}\"", first_time_stamp.to_rfc3339()))
.expect("could not parse query");
let results = searcher
.search(&query, &TopDocs::with_limit(5))
@@ -121,7 +121,7 @@ mod tests {
{
let parser = QueryParser::for_index(&index, vec![date_field]);
let query = parser
.parse_query(&format!("\"{}\"", two_secs_ahead.to_rfc3339()).to_string())
.parse_query(&format!("\"{}\"", two_secs_ahead.to_rfc3339()))
.expect("could not parse query");
let results = searcher
.search(&query, &TopDocs::with_limit(5))

View File

@@ -1,6 +1,6 @@
use std::ops::Range;
use crate::fastfield::{BitpackedFastFieldReader, FastFieldReader, FastValue, MultiValueLength};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue, MultiValueLength};
use crate::DocId;
/// Reader for a multivalued `u64` fast field.
@@ -13,14 +13,14 @@ use crate::DocId;
///
#[derive(Clone)]
pub struct MultiValuedFastFieldReader<Item: FastValue> {
idx_reader: BitpackedFastFieldReader<u64>,
vals_reader: BitpackedFastFieldReader<Item>,
idx_reader: DynamicFastFieldReader<u64>,
vals_reader: DynamicFastFieldReader<Item>,
}
impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
pub(crate) fn open(
idx_reader: BitpackedFastFieldReader<u64>,
vals_reader: BitpackedFastFieldReader<Item>,
idx_reader: DynamicFastFieldReader<u64>,
vals_reader: DynamicFastFieldReader<Item>,
) -> MultiValuedFastFieldReader<Item> {
MultiValuedFastFieldReader {
idx_reader,
@@ -30,6 +30,7 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
/// Returns `(start, stop)`, such that the values associated
/// to the given document are `start..stop`.
#[inline]
fn range(&self, doc: DocId) -> Range<u64> {
let start = self.idx_reader.get(doc);
let stop = self.idx_reader.get(doc + 1);
@@ -37,20 +38,41 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
}
/// Returns the array of values associated to the given `doc`.
#[inline]
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
let range = self.range(doc);
let len = (range.end - range.start) as usize;
vals.resize(len, Item::make_zero());
self.vals_reader.get_range_u64(range.start, &mut vals[..]);
self.vals_reader.get_range(range.start, &mut vals[..]);
}
/// Returns the minimum value for this fast field.
///
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual mimimum value.
pub fn min_value(&self) -> Item {
self.vals_reader.min_value()
}
/// Returns the maximum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
pub fn max_value(&self) -> Item {
self.vals_reader.max_value()
}
/// Returns the number of values associated with the document `DocId`.
#[inline]
pub fn num_vals(&self, doc: DocId) -> usize {
let range = self.range(doc);
(range.end - range.start) as usize
}
/// Returns the overall number of values in this field .
#[inline]
pub fn total_num_vals(&self) -> u64 {
self.idx_reader.max_value()
}
@@ -69,7 +91,7 @@ impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
mod tests {
use crate::core::Index;
use crate::schema::{Facet, Schema, INDEXED};
use crate::schema::{Cardinality, Facet, IntOptions, Schema, INDEXED};
#[test]
fn test_multifastfield_reader() {
@@ -124,4 +146,32 @@ mod tests {
assert_eq!(&vals[..], &[4]);
}
}
#[test]
fn test_multifastfield_reader_min_max() {
let mut schema_builder = Schema::builder();
let field_options = IntOptions::default()
.set_indexed()
.set_fast(Cardinality::MultiValues);
let item_field = schema_builder.add_i64_field("items", field_options);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index
.writer_for_tests()
.expect("Failed to create index writer.");
index_writer.add_document(doc!(
item_field => 2i64,
item_field => 3i64,
item_field => -2i64,
));
index_writer.add_document(doc!(item_field => 6i64, item_field => 3i64));
index_writer.add_document(doc!(item_field => 4i64));
index_writer.commit().expect("Commit failed");
let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0);
let field_reader = segment_reader.fast_fields().i64s(item_field).unwrap();
assert_eq!(field_reader.min_value(), -2);
assert_eq!(field_reader.max_value(), 6);
}
}

View File

@@ -1,5 +1,4 @@
use crate::fastfield::serializer::DynamicFastFieldSerializer;
use crate::fastfield::serializer::FastFieldSerializer;
use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy;
use crate::fastfield::CompositeFastFieldSerializer;
use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field};
@@ -103,11 +102,11 @@ impl MultiValuedFastFieldWriter {
&'a self,
doc_id_map: Option<&'b DocIdMapping>,
) -> impl Iterator<Item = &'b [u64]> {
let doc_id_iter = if let Some(doc_id_map) = doc_id_map {
Box::new(doc_id_map.iter_old_doc_ids().cloned()) as Box<dyn Iterator<Item = u32>>
let doc_id_iter: Box<dyn Iterator<Item = u32>> = if let Some(doc_id_map) = doc_id_map {
Box::new(doc_id_map.iter_old_doc_ids())
} else {
Box::new(self.doc_index.iter().enumerate().map(|el| el.0 as u32))
as Box<dyn Iterator<Item = u32>>
let max_doc = self.doc_index.len() as DocId;
Box::new(0..max_doc)
};
doc_id_iter.map(move |doc_id| self.get_values_for_doc_id(doc_id))
}
@@ -155,7 +154,7 @@ impl MultiValuedFastFieldWriter {
}
{
// writing the values themselves.
let mut value_serializer: DynamicFastFieldSerializer<'_, _>;
let mut value_serializer: BitpackedFastFieldSerializerLegacy<'_, _>;
match mapping_opt {
Some(mapping) => {
value_serializer = serializer.new_u64_fast_field_with_idx(

View File

@@ -8,11 +8,17 @@ use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema;
use crate::schema::FAST;
use crate::DocId;
use fastfield_codecs::bitpacked::BitpackedFastFieldReader as BitpackedReader;
use fastfield_codecs::bitpacked::BitpackedFastFieldSerializer;
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldReader;
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldReader;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
use fastfield_codecs::FastFieldCodecReader;
use fastfield_codecs::FastFieldCodecSerializer;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::Path;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitUnpacker;
/// FastFieldReader is the trait to access fast field data.
pub trait FastFieldReader<Item: FastValue>: Clone {
@@ -38,13 +44,13 @@ pub trait FastFieldReader<Item: FastValue>: Clone {
///
/// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`.
fn get_range(&self, start: DocId, output: &mut [Item]);
fn get_range(&self, start: u64, output: &mut [Item]);
/// Returns the minimum value for this fast field.
///
/// The max value does not take in account of possible
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
/// The min value does not take in account of possible
/// deleted document, and should be considered as a lower bound
/// of the actual mimimum value.
fn min_value(&self) -> Item;
/// Returns the maximum value for this fast field.
@@ -61,15 +67,48 @@ pub trait FastFieldReader<Item: FastValue>: Clone {
///
pub enum DynamicFastFieldReader<Item: FastValue> {
/// Bitpacked compressed fastfield data.
Bitpacked(BitpackedFastFieldReader<Item>),
Bitpacked(FastFieldReaderCodecWrapper<Item, BitpackedReader>),
/// Linear interpolated values + bitpacked
LinearInterpol(FastFieldReaderCodecWrapper<Item, LinearInterpolFastFieldReader>),
/// Blockwise linear interpolated values + bitpacked
MultiLinearInterpol(FastFieldReaderCodecWrapper<Item, MultiLinearInterpolFastFieldReader>),
}
impl<Item: FastValue> DynamicFastFieldReader<Item> {
/// Returns correct the reader wrapped in the `DynamicFastFieldReader` enum for the data.
pub fn open(file: FileSlice) -> crate::Result<DynamicFastFieldReader<Item>> {
Ok(DynamicFastFieldReader::Bitpacked(
BitpackedFastFieldReader::open(file)?,
))
let mut bytes = file.read_bytes()?;
let id = bytes.read_u8();
let reader = match id {
BitpackedFastFieldSerializer::ID => {
DynamicFastFieldReader::Bitpacked(FastFieldReaderCodecWrapper::<
Item,
BitpackedReader,
>::open_from_bytes(bytes)?)
}
LinearInterpolFastFieldSerializer::ID => {
DynamicFastFieldReader::LinearInterpol(FastFieldReaderCodecWrapper::<
Item,
LinearInterpolFastFieldReader,
>::open_from_bytes(bytes)?)
}
MultiLinearInterpolFastFieldSerializer::ID => {
DynamicFastFieldReader::MultiLinearInterpol(FastFieldReaderCodecWrapper::<
Item,
MultiLinearInterpolFastFieldReader,
>::open_from_bytes(
bytes
)?)
}
_ => {
panic!(
"unknown fastfield id {:?}. Data corrupted or using old tantivy version.",
id
)
}
};
Ok(reader)
}
}
@@ -77,57 +116,67 @@ impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
fn get(&self, doc: DocId) -> Item {
match self {
Self::Bitpacked(reader) => reader.get(doc),
Self::LinearInterpol(reader) => reader.get(doc),
Self::MultiLinearInterpol(reader) => reader.get(doc),
}
}
fn get_range(&self, start: DocId, output: &mut [Item]) {
fn get_range(&self, start: u64, output: &mut [Item]) {
match self {
Self::Bitpacked(reader) => reader.get_range(start, output),
Self::LinearInterpol(reader) => reader.get_range(start, output),
Self::MultiLinearInterpol(reader) => reader.get_range(start, output),
}
}
fn min_value(&self) -> Item {
match self {
Self::Bitpacked(reader) => reader.min_value(),
Self::LinearInterpol(reader) => reader.min_value(),
Self::MultiLinearInterpol(reader) => reader.min_value(),
}
}
fn max_value(&self) -> Item {
match self {
Self::Bitpacked(reader) => reader.max_value(),
Self::LinearInterpol(reader) => reader.max_value(),
Self::MultiLinearInterpol(reader) => reader.max_value(),
}
}
}
/// Trait for accessing a fastfield.
/// Wrapper for accessing a fastfield.
///
/// Holds the data and the codec to the read the data.
///
/// Depending on the field type, a different
/// fast field is required.
#[derive(Clone)]
pub struct BitpackedFastFieldReader<Item: FastValue> {
pub struct FastFieldReaderCodecWrapper<Item: FastValue, CodecReader> {
reader: CodecReader,
bytes: OwnedBytes,
bit_unpacker: BitUnpacker,
min_value_u64: u64,
max_value_u64: u64,
_phantom: PhantomData<Item>,
}
impl<Item: FastValue> BitpackedFastFieldReader<Item> {
impl<Item: FastValue, C: FastFieldCodecReader> FastFieldReaderCodecWrapper<Item, C> {
/// Opens a fast field given a file.
pub fn open(file: FileSlice) -> crate::Result<Self> {
let mut bytes = file.read_bytes()?;
let min_value = u64::deserialize(&mut bytes)?;
let amplitude = u64::deserialize(&mut bytes)?;
let max_value = min_value + amplitude;
let num_bits = compute_num_bits(amplitude);
let bit_unpacker = BitUnpacker::new(num_bits);
Ok(BitpackedFastFieldReader {
let id = u8::deserialize(&mut bytes)?;
assert_eq!(
BitpackedFastFieldSerializer::ID,
id,
"Tried to open fast field as bitpacked encoded (id=1), but got serializer with different id"
);
Self::open_from_bytes(bytes)
}
/// Opens a fast field given the bytes.
pub fn open_from_bytes(bytes: OwnedBytes) -> crate::Result<Self> {
let reader = C::open_from_bytes(bytes.as_slice())?;
Ok(FastFieldReaderCodecWrapper {
reader,
bytes,
min_value_u64: min_value,
max_value_u64: max_value,
bit_unpacker,
_phantom: PhantomData,
})
}
pub(crate) fn get_u64(&self, doc: u64) -> Item {
Item::from_u64(self.min_value_u64 + self.bit_unpacker.get(doc, &self.bytes))
Item::from_u64(self.reader.get_u64(doc, self.bytes.as_slice()))
}
/// Internally `multivalued` also use SingleValue Fast fields.
@@ -149,7 +198,9 @@ impl<Item: FastValue> BitpackedFastFieldReader<Item> {
}
}
impl<Item: FastValue> FastFieldReader<Item> for BitpackedFastFieldReader<Item> {
impl<Item: FastValue, C: FastFieldCodecReader + Clone> FastFieldReader<Item>
for FastFieldReaderCodecWrapper<Item, C>
{
/// Return the value associated to the given document.
///
/// This accessor should return as fast as possible.
@@ -175,8 +226,8 @@ impl<Item: FastValue> FastFieldReader<Item> for BitpackedFastFieldReader<Item> {
///
/// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`.
fn get_range(&self, start: DocId, output: &mut [Item]) {
self.get_range_u64(u64::from(start), output);
fn get_range(&self, start: u64, output: &mut [Item]) {
self.get_range_u64(start, output);
}
/// Returns the minimum value for this fast field.
@@ -185,7 +236,7 @@ impl<Item: FastValue> FastFieldReader<Item> for BitpackedFastFieldReader<Item> {
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
fn min_value(&self) -> Item {
Item::from_u64(self.min_value_u64)
Item::from_u64(self.reader.min_value())
}
/// Returns the maximum value for this fast field.
@@ -194,12 +245,14 @@ impl<Item: FastValue> FastFieldReader<Item> for BitpackedFastFieldReader<Item> {
/// deleted document, and should be considered as an upper bound
/// of the actual maximum value.
fn max_value(&self) -> Item {
Item::from_u64(self.max_value_u64)
Item::from_u64(self.reader.max_value())
}
}
impl<Item: FastValue> From<Vec<Item>> for BitpackedFastFieldReader<Item> {
fn from(vals: Vec<Item>) -> BitpackedFastFieldReader<Item> {
pub(crate) type BitpackedFastFieldReader<Item> = FastFieldReaderCodecWrapper<Item, BitpackedReader>;
impl<Item: FastValue> From<Vec<Item>> for DynamicFastFieldReader<Item> {
fn from(vals: Vec<Item>) -> DynamicFastFieldReader<Item> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("field", FAST);
let schema = schema_builder.build();
@@ -231,6 +284,6 @@ impl<Item: FastValue> From<Vec<Item>> for BitpackedFastFieldReader<Item> {
let field_file = composite_file
.open_read(field)
.expect("File component not found");
BitpackedFastFieldReader::open(field_file).unwrap()
DynamicFastFieldReader::open(field_file).unwrap()
}
}

View File

@@ -8,7 +8,6 @@ use crate::space_usage::PerFieldSpaceUsage;
use crate::TantivyError;
use super::reader::DynamicFastFieldReader;
use super::FastFieldReader;
/// Provides access to all of the BitpackedFastFieldReader.
///
@@ -100,23 +99,27 @@ impl FastFieldReaders {
Ok(())
}
pub(crate) fn typed_fast_field_reader_with_idx<TFastValue: FastValue>(
&self,
field: Field,
index: usize,
) -> crate::Result<DynamicFastFieldReader<TFastValue>> {
let fast_field_slice = self.fast_field_data(field, index)?;
DynamicFastFieldReader::open(fast_field_slice)
}
pub(crate) fn typed_fast_field_reader<TFastValue: FastValue>(
&self,
field: Field,
) -> crate::Result<DynamicFastFieldReader<TFastValue>> {
let fast_field_slice = self.fast_field_data(field, 0)?;
DynamicFastFieldReader::open(fast_field_slice)
self.typed_fast_field_reader_with_idx(field, 0)
}
pub(crate) fn typed_fast_field_multi_reader<TFastValue: FastValue>(
&self,
field: Field,
) -> crate::Result<MultiValuedFastFieldReader<TFastValue>> {
let fast_field_slice_idx = self.fast_field_data(field, 0)?;
let fast_field_slice_vals = self.fast_field_data(field, 1)?;
let idx_reader = BitpackedFastFieldReader::open(fast_field_slice_idx)?;
let vals_reader: BitpackedFastFieldReader<TFastValue> =
BitpackedFastFieldReader::open(fast_field_slice_vals)?;
let idx_reader = self.typed_fast_field_reader(field)?;
let vals_reader = self.typed_fast_field_reader_with_idx(field, 1)?;
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
}
@@ -139,7 +142,7 @@ impl FastFieldReaders {
/// Returns the `i64` fast field reader reader associated to `field`.
///
/// If `field` is not a i64 fast field, this method returns an Error.
pub fn i64(&self, field: Field) -> crate::Result<impl FastFieldReader<i64>> {
pub fn i64(&self, field: Field) -> crate::Result<DynamicFastFieldReader<i64>> {
self.check_type(field, FastType::I64, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
}
@@ -147,7 +150,7 @@ impl FastFieldReaders {
/// Returns the `i64` fast field reader reader associated to `field`.
///
/// If `field` is not a i64 fast field, this method returns an Error.
pub fn date(&self, field: Field) -> crate::Result<impl FastFieldReader<crate::DateTime>> {
pub fn date(&self, field: Field) -> crate::Result<DynamicFastFieldReader<crate::DateTime>> {
self.check_type(field, FastType::Date, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
}
@@ -155,7 +158,7 @@ impl FastFieldReaders {
/// Returns the `f64` fast field reader reader associated to `field`.
///
/// If `field` is not a f64 fast field, this method returns an Error.
pub fn f64(&self, field: Field) -> crate::Result<impl FastFieldReader<f64>> {
pub fn f64(&self, field: Field) -> crate::Result<DynamicFastFieldReader<f64>> {
self.check_type(field, FastType::F64, Cardinality::SingleValue)?;
self.typed_fast_field_reader(field)
}

View File

@@ -1,256 +0,0 @@
use crate::common::BinarySerializable;
use crate::common::CompositeWrite;
use crate::common::CountingWriter;
use crate::directory::WritePtr;
use crate::schema::Field;
use std::io::{self, Write};
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
/// `CompositeFastFieldSerializer` is in charge of serializing
/// fastfields on disk.
///
/// Fast fields have differnt encodings like bit-packing.
///
/// `FastFieldWriter`s are in charge of pushing the data to
/// the serializer.
/// The serializer expects to receive the following calls.
///
/// * `new_u64_fast_field(...)`
/// * `add_val(...)`
/// * `add_val(...)`
/// * `add_val(...)`
/// * ...
/// * `close_field()`
/// * `new_u64_fast_field(...)`
/// * `add_val(...)`
/// * ...
/// * `close_field()`
/// * `close()`
pub struct CompositeFastFieldSerializer {
composite_write: CompositeWrite<WritePtr>,
}
impl CompositeFastFieldSerializer {
/// Constructor
pub fn from_write(write: WritePtr) -> io::Result<CompositeFastFieldSerializer> {
// just making room for the pointer to header.
let composite_write = CompositeWrite::wrap(write);
Ok(CompositeFastFieldSerializer { composite_write })
}
/// Start serializing a new u64 fast field
pub fn new_u64_fast_field(
&mut self,
field: Field,
min_value: u64,
max_value: u64,
) -> io::Result<DynamicFastFieldSerializer<'_, CountingWriter<WritePtr>>> {
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
}
/// Start serializing a new u64 fast field
pub fn new_u64_fast_field_with_idx(
&mut self,
field: Field,
min_value: u64,
max_value: u64,
idx: usize,
) -> io::Result<DynamicFastFieldSerializer<'_, CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
DynamicFastFieldSerializer::open(field_write, min_value, max_value)
}
/// Start serializing a new [u8] fast field
pub fn new_bytes_fast_field_with_idx(
&mut self,
field: Field,
idx: usize,
) -> FastBytesFieldSerializer<'_, CountingWriter<WritePtr>> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
FastBytesFieldSerializer { write: field_write }
}
/// Closes the serializer
///
/// After this call the data must be persistently save on disk.
pub fn close(self) -> io::Result<()> {
self.composite_write.close()
}
}
#[derive(Debug, Clone)]
pub struct EstimationStats {
min_value: u64,
max_value: u64,
}
/// The FastFieldSerializer trait is the common interface
/// implemented by every fastfield serializer variant.
///
/// `DynamicFastFieldSerializer` is the enum wrapping all variants.
/// It is used to create an serializer instance.
pub trait FastFieldSerializer {
/// add value to serializer
fn add_val(&mut self, val: u64) -> io::Result<()>;
/// finish serializing a field.
fn close_field(self) -> io::Result<()>;
}
/// The FastFieldSerializerEstimate trait is required on all variants
/// of fast field compressions, to decide which one to choose.
pub trait FastFieldSerializerEstimate {
/// returns an estimate of the compression ratio.
fn estimate(
/*fastfield_accessor: impl FastFieldReader<u64>,*/ stats: EstimationStats,
) -> (f32, &'static str);
/// the unique name of the compressor
fn name() -> &'static str;
}
pub enum DynamicFastFieldSerializer<'a, W: Write> {
Bitpacked(BitpackedFastFieldSerializer<'a, W>),
}
impl<'a, W: Write> DynamicFastFieldSerializer<'a, W> {
/// Creates a new fast field serializer.
///
/// The serializer in fact encode the values by bitpacking
/// `(val - min_value)`.
///
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
pub fn open(
write: &'a mut W,
min_value: u64,
max_value: u64,
) -> io::Result<DynamicFastFieldSerializer<'a, W>> {
let stats = EstimationStats {
min_value,
max_value,
};
let (_ratio, name) = (
BitpackedFastFieldSerializer::<Vec<u8>>::estimate(stats),
BitpackedFastFieldSerializer::<Vec<u8>>::name(),
);
Self::open_from_name(write, min_value, max_value, name)
}
/// Creates a new fast field serializer.
///
/// The serializer in fact encode the values by bitpacking
/// `(val - min_value)`.
///
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
pub fn open_from_name(
write: &'a mut W,
min_value: u64,
max_value: u64,
name: &str,
) -> io::Result<DynamicFastFieldSerializer<'a, W>> {
// Weirdly the W generic on BitpackedFastFieldSerializer needs to be set,
// although name() doesn't use it
let variant = if name == BitpackedFastFieldSerializer::<Vec<u8>>::name() {
DynamicFastFieldSerializer::Bitpacked(BitpackedFastFieldSerializer::open(
write, min_value, max_value,
)?)
} else {
panic!("unknown fastfield serializer {}", name);
};
Ok(variant)
}
}
impl<'a, W: Write> FastFieldSerializer for DynamicFastFieldSerializer<'a, W> {
fn add_val(&mut self, val: u64) -> io::Result<()> {
match self {
Self::Bitpacked(serializer) => serializer.add_val(val),
}
}
fn close_field(self) -> io::Result<()> {
match self {
Self::Bitpacked(serializer) => serializer.close_field(),
}
}
}
pub struct BitpackedFastFieldSerializer<'a, W: Write> {
bit_packer: BitPacker,
write: &'a mut W,
min_value: u64,
num_bits: u8,
}
impl<'a, W: Write> BitpackedFastFieldSerializer<'a, W> {
/// Creates a new fast field serializer.
///
/// The serializer in fact encode the values by bitpacking
/// `(val - min_value)`.
///
/// It requires a `min_value` and a `max_value` to compute
/// compute the minimum number of bits required to encode
/// values.
fn open(
write: &'a mut W,
min_value: u64,
max_value: u64,
) -> io::Result<BitpackedFastFieldSerializer<'a, W>> {
assert!(min_value <= max_value);
min_value.serialize(write)?;
let amplitude = max_value - min_value;
amplitude.serialize(write)?;
let num_bits = compute_num_bits(amplitude);
let bit_packer = BitPacker::new();
Ok(BitpackedFastFieldSerializer {
bit_packer,
write,
min_value,
num_bits,
})
}
}
impl<'a, W: 'a + Write> FastFieldSerializer for BitpackedFastFieldSerializer<'a, W> {
/// Pushes a new value to the currently open u64 fast field.
fn add_val(&mut self, val: u64) -> io::Result<()> {
let val_to_write: u64 = val - self.min_value;
self.bit_packer
.write(val_to_write, self.num_bits, &mut self.write)?;
Ok(())
}
fn close_field(mut self) -> io::Result<()> {
self.bit_packer.close(&mut self.write)
}
}
impl<'a, W: 'a + Write> FastFieldSerializerEstimate for BitpackedFastFieldSerializer<'a, W> {
fn estimate(
/*_fastfield_accessor: impl FastFieldReader<u64>, */ stats: EstimationStats,
) -> (f32, &'static str) {
let amplitude = stats.max_value - stats.min_value;
let num_bits = compute_num_bits(amplitude);
let num_bits_uncompressed = 64;
let ratio = num_bits as f32 / num_bits_uncompressed as f32;
let name = Self::name();
(ratio, name)
}
fn name() -> &'static str {
"Bitpacked"
}
}
pub struct FastBytesFieldSerializer<'a, W: Write> {
write: &'a mut W,
}
impl<'a, W: Write> FastBytesFieldSerializer<'a, W> {
pub fn write_all(&mut self, vals: &[u8]) -> io::Result<()> {
self.write.write_all(vals)
}
pub fn flush(&mut self) -> io::Result<()> {
self.write.flush()
}
}

View File

@@ -0,0 +1,218 @@
use crate::common::BinarySerializable;
use crate::common::CompositeWrite;
use crate::common::CountingWriter;
use crate::directory::WritePtr;
use crate::schema::Field;
pub use fastfield_codecs::bitpacked::BitpackedFastFieldSerializer;
pub use fastfield_codecs::bitpacked::BitpackedFastFieldSerializerLegacy;
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
pub use fastfield_codecs::FastFieldCodecSerializer;
pub use fastfield_codecs::FastFieldDataAccess;
pub use fastfield_codecs::FastFieldStats;
use std::io::{self, Write};
/// `CompositeFastFieldSerializer` is in charge of serializing
/// fastfields on disk.
///
/// Fast fields have different encodings like bit-packing.
///
/// `FastFieldWriter`s are in charge of pushing the data to
/// the serializer.
/// The serializer expects to receive the following calls.
///
/// * `new_u64_fast_field(...)`
/// * `add_val(...)`
/// * `add_val(...)`
/// * `add_val(...)`
/// * ...
/// * `close_field()`
/// * `new_u64_fast_field(...)`
/// * `add_val(...)`
/// * ...
/// * `close_field()`
/// * `close()`
pub struct CompositeFastFieldSerializer {
composite_write: CompositeWrite<WritePtr>,
}
// use this, when this is merged and stabilized explicit_generic_args_with_impl_trait
// https://github.com/rust-lang/rust/pull/86176
fn codec_estimation<T: FastFieldCodecSerializer, A: FastFieldDataAccess>(
stats: FastFieldStats,
fastfield_accessor: &A,
estimations: &mut Vec<(f32, &str, u8)>,
) {
if !T::is_applicable(fastfield_accessor, stats.clone()) {
return;
}
let (ratio, name, id) = (T::estimate(fastfield_accessor, stats), T::NAME, T::ID);
estimations.push((ratio, name, id));
}
impl CompositeFastFieldSerializer {
/// Constructor
pub fn from_write(write: WritePtr) -> io::Result<CompositeFastFieldSerializer> {
// just making room for the pointer to header.
let composite_write = CompositeWrite::wrap(write);
Ok(CompositeFastFieldSerializer { composite_write })
}
/// Serialize data into a new u64 fast field. The best compression codec will be chosen automatically.
pub fn create_auto_detect_u64_fast_field(
&mut self,
field: Field,
stats: FastFieldStats,
fastfield_accessor: impl FastFieldDataAccess,
data_iter_1: impl Iterator<Item = u64>,
data_iter_2: impl Iterator<Item = u64>,
) -> io::Result<()> {
self.create_auto_detect_u64_fast_field_with_idx(
field,
stats,
fastfield_accessor,
data_iter_1,
data_iter_2,
0,
)
}
/// Serialize data into a new u64 fast field. The best compression codec will be chosen automatically.
pub fn create_auto_detect_u64_fast_field_with_idx(
&mut self,
field: Field,
stats: FastFieldStats,
fastfield_accessor: impl FastFieldDataAccess,
data_iter_1: impl Iterator<Item = u64>,
data_iter_2: impl Iterator<Item = u64>,
idx: usize,
) -> io::Result<()> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
let mut estimations = vec![];
codec_estimation::<BitpackedFastFieldSerializer, _>(
stats.clone(),
&fastfield_accessor,
&mut estimations,
);
codec_estimation::<LinearInterpolFastFieldSerializer, _>(
stats.clone(),
&fastfield_accessor,
&mut estimations,
);
codec_estimation::<MultiLinearInterpolFastFieldSerializer, _>(
stats.clone(),
&fastfield_accessor,
&mut estimations,
);
if let Some(broken_estimation) = estimations
.iter()
.find(|estimation| estimation.0 == f32::NAN)
{
warn!(
"broken estimation for fast field codec {}",
broken_estimation.1
);
}
// removing nan values for codecs with broken calculations, and max values which disables codecs
estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX);
estimations.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let (_ratio, name, id) = estimations[0];
debug!(
"choosing fast field codec {} for field_id {:?}",
name, field
); // todo print actual field name
id.serialize(field_write)?;
match name {
BitpackedFastFieldSerializer::NAME => {
BitpackedFastFieldSerializer::serialize(
field_write,
&fastfield_accessor,
stats,
data_iter_1,
data_iter_2,
)?;
}
LinearInterpolFastFieldSerializer::NAME => {
LinearInterpolFastFieldSerializer::serialize(
field_write,
&fastfield_accessor,
stats,
data_iter_1,
data_iter_2,
)?;
}
MultiLinearInterpolFastFieldSerializer::NAME => {
MultiLinearInterpolFastFieldSerializer::serialize(
field_write,
&fastfield_accessor,
stats,
data_iter_1,
data_iter_2,
)?;
}
_ => {
panic!("unknown fastfield serializer {}", name)
}
};
field_write.flush()?;
Ok(())
}
/// Start serializing a new u64 fast field
pub fn new_u64_fast_field(
&mut self,
field: Field,
min_value: u64,
max_value: u64,
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
self.new_u64_fast_field_with_idx(field, min_value, max_value, 0)
}
/// Start serializing a new u64 fast field
pub fn new_u64_fast_field_with_idx(
&mut self,
field: Field,
min_value: u64,
max_value: u64,
idx: usize,
) -> io::Result<BitpackedFastFieldSerializerLegacy<'_, CountingWriter<WritePtr>>> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
// Prepend codec id to field data for compatibility with DynamicFastFieldReader.
let id = BitpackedFastFieldSerializer::ID;
id.serialize(field_write)?;
BitpackedFastFieldSerializerLegacy::open(field_write, min_value, max_value)
}
/// Start serializing a new [u8] fast field
pub fn new_bytes_fast_field_with_idx(
&mut self,
field: Field,
idx: usize,
) -> FastBytesFieldSerializer<'_, CountingWriter<WritePtr>> {
let field_write = self.composite_write.for_field_with_idx(field, idx);
FastBytesFieldSerializer { write: field_write }
}
/// Closes the serializer
///
/// After this call the data must be persistently save on disk.
pub fn close(self) -> io::Result<()> {
self.composite_write.close()
}
}
pub struct FastBytesFieldSerializer<'a, W: Write> {
write: &'a mut W,
}
impl<'a, W: Write> FastBytesFieldSerializer<'a, W> {
pub fn write_all(&mut self, vals: &[u8]) -> io::Result<()> {
self.write.write_all(vals)
}
pub fn flush(&mut self) -> io::Result<()> {
self.write.flush()
}
}

View File

@@ -1,6 +1,7 @@
use super::multivalued::MultiValuedFastFieldWriter;
use super::serializer::FastFieldStats;
use super::FastFieldDataAccess;
use crate::common;
use crate::fastfield::serializer::FastFieldSerializer;
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::UnorderedTermId;
@@ -265,9 +266,9 @@ impl IntFastFieldWriter {
self.add_val(val);
}
/// Extract the stored data
pub(crate) fn get_data(&self) -> Vec<u64> {
self.vals.iter().collect::<Vec<u64>>()
/// get iterator over the data
pub(crate) fn iter(&self) -> impl Iterator<Item = u64> + '_ {
self.vals.iter()
}
/// Push the fast fields value to the `FastFieldWriter`.
@@ -281,17 +282,59 @@ impl IntFastFieldWriter {
} else {
(self.val_min, self.val_max)
};
let mut single_field_serializer = serializer.new_u64_fast_field(self.field, min, max)?;
if let Some(doc_id_map) = doc_id_map {
for doc_id in doc_id_map.iter_old_doc_ids() {
single_field_serializer.add_val(self.vals.get(*doc_id as usize))?;
}
} else {
for val in self.vals.iter() {
single_field_serializer.add_val(val)?;
}
let fastfield_accessor = WriterFastFieldAccessProvider {
doc_id_map,
vals: &self.vals,
};
let stats = FastFieldStats {
min_value: min,
max_value: max,
num_vals: self.val_count as u64,
};
single_field_serializer.close_field()
if let Some(doc_id_map) = doc_id_map {
let iter = doc_id_map
.iter_old_doc_ids()
.map(|doc_id| self.vals.get(doc_id as usize));
serializer.create_auto_detect_u64_fast_field(
self.field,
stats,
fastfield_accessor,
iter.clone(),
iter,
)?;
} else {
serializer.create_auto_detect_u64_fast_field(
self.field,
stats,
fastfield_accessor,
self.vals.iter(),
self.vals.iter(),
)?;
};
Ok(())
}
}
#[derive(Clone)]
struct WriterFastFieldAccessProvider<'map, 'bitp> {
doc_id_map: Option<&'map DocIdMapping>,
vals: &'bitp BlockedBitpacker,
}
impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'bitp> {
/// Return the value associated to the given doc.
///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance reasons.
///
/// # Panics
///
/// May panic if `doc` is greater than the index.
fn get_val(&self, doc: u64) -> u64 {
if let Some(doc_id_map) = self.doc_id_map {
self.vals
.get(doc_id_map.get_old_doc_id(doc as u32) as usize) // consider extra FastFieldReader wrapper for non doc_id_map
} else {
self.vals.get(doc as usize)
}
}
}

View File

@@ -98,7 +98,7 @@ impl FieldNormsWriter {
let mut mapped_fieldnorm_values = vec![];
mapped_fieldnorm_values.resize(fieldnorm_values.len(), 0u8);
for (new_doc_id, old_doc_id) in doc_id_map.iter_old_doc_ids().enumerate() {
mapped_fieldnorm_values[new_doc_id] = fieldnorm_values[*old_doc_id as usize];
mapped_fieldnorm_values[new_doc_id] = fieldnorm_values[old_doc_id as usize];
}
fieldnorms_serializer.serialize_field(field, &mapped_fieldnorm_values)?;
} else {

View File

@@ -38,7 +38,7 @@ fn test_functional_store() -> crate::Result<()> {
for iteration in 0..500 {
dbg!(iteration);
let num_docs: usize = rng.gen_range(0..4);
if doc_set.len() >= 1 {
if !doc_set.is_empty() {
let doc_to_remove_id = rng.gen_range(0..doc_set.len());
let removed_doc_id = doc_set.swap_remove(doc_to_remove_id);
index_writer.delete_term(Term::from_field_u64(id_field, removed_doc_id));
@@ -88,19 +88,17 @@ fn test_functional_indexing() -> crate::Result<()> {
&searcher,
&committed_docs.iter().cloned().collect::<Vec<u64>>(),
)?;
} else if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u64(id_field, random_val);
index_writer.delete_term(doc_id_term);
} else {
if committed_docs.remove(&random_val) || uncommitted_docs.remove(&random_val) {
let doc_id_term = Term::from_field_u64(id_field, random_val);
index_writer.delete_term(doc_id_term);
} else {
uncommitted_docs.insert(random_val);
let mut doc = Document::new();
doc.add_u64(id_field, random_val);
for i in 1u64..10u64 {
doc.add_u64(multiples_field, random_val * i);
}
index_writer.add_document(doc);
uncommitted_docs.insert(random_val);
let mut doc = Document::new();
doc.add_u64(id_field, random_val);
for i in 1u64..10u64 {
doc.add_u64(multiples_field, random_val * i);
}
index_writer.add_document(doc);
}
}
Ok(())

View File

@@ -1,6 +1,6 @@
use super::operation::DeleteOperation;
use crate::Opstamp;
use std::mem;
use std::ops::DerefMut;
use std::sync::{Arc, RwLock, Weak};
@@ -105,7 +105,7 @@ impl DeleteQueue {
return None;
}
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
let delete_operations = std::mem::take(&mut self_wlock.writer);
let new_block = Arc::new(Block {
operations: Arc::from(delete_operations.into_boxed_slice()),
@@ -286,7 +286,7 @@ mod tests {
operations_it.advance();
}
{
let mut operations_it = snapshot.clone();
let mut operations_it = snapshot;
assert_eq!(operations_it.get().unwrap().opstamp, 1);
operations_it.advance();
assert_eq!(operations_it.get().unwrap().opstamp, 2);

View File

@@ -2,13 +2,61 @@
//! to get mappings from old doc_id to new doc_id and vice versa, after sorting
//!
use super::SegmentWriter;
use super::{merger::SegmentReaderWithOrdinal, SegmentWriter};
use crate::{
schema::{Field, Schema},
DocId, IndexSortByField, Order, TantivyError,
};
use std::cmp::Reverse;
/// Struct to provide mapping from old doc_id to new doc_id and vice versa
use std::{cmp::Reverse, ops::Index};
/// Struct to provide mapping from new doc_id to old doc_id and segment.
#[derive(Clone)]
pub(crate) struct SegmentDocidMapping<'a> {
new_doc_id_to_old_and_segment: Vec<(DocId, SegmentReaderWithOrdinal<'a>)>,
is_trivial: bool,
}
impl<'a> SegmentDocidMapping<'a> {
pub(crate) fn new(
new_doc_id_to_old_and_segment: Vec<(DocId, SegmentReaderWithOrdinal<'a>)>,
is_trivial: bool,
) -> Self {
Self {
new_doc_id_to_old_and_segment,
is_trivial,
}
}
pub(crate) fn iter(&self) -> impl Iterator<Item = &(DocId, SegmentReaderWithOrdinal)> {
self.new_doc_id_to_old_and_segment.iter()
}
pub(crate) fn len(&self) -> usize {
self.new_doc_id_to_old_and_segment.len()
}
/// This flags means the segments are simply stacked in the order of their ordinal.
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
///
/// This allows for some optimization.
pub(crate) fn is_trivial(&self) -> bool {
self.is_trivial
}
}
impl<'a> Index<usize> for SegmentDocidMapping<'a> {
type Output = (DocId, SegmentReaderWithOrdinal<'a>);
fn index(&self, idx: usize) -> &Self::Output {
&self.new_doc_id_to_old_and_segment[idx]
}
}
impl<'a> IntoIterator for SegmentDocidMapping<'a> {
type Item = (DocId, SegmentReaderWithOrdinal<'a>);
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.new_doc_id_to_old_and_segment.into_iter()
}
}
/// Struct to provide mapping from old doc_id to new doc_id and vice versa within a segment.
pub struct DocIdMapping {
new_doc_id_to_old: Vec<DocId>,
old_doc_id_to_new: Vec<DocId>,
@@ -24,8 +72,8 @@ impl DocIdMapping {
self.new_doc_id_to_old[doc_id as usize]
}
/// iterate over old doc_ids in order of the new doc_ids
pub fn iter_old_doc_ids(&self) -> std::slice::Iter<'_, DocId> {
self.new_doc_id_to_old.iter()
pub fn iter_old_doc_ids(&self) -> impl Iterator<Item = DocId> + Clone + '_ {
self.new_doc_id_to_old.iter().cloned()
}
}
@@ -60,9 +108,8 @@ pub(crate) fn get_doc_id_mapping_from_field(
})?;
// create new doc_id to old doc_id index (used in fast_field_writers)
let data = fast_field.get_data();
let mut doc_id_and_data = data
.into_iter()
let mut doc_id_and_data = fast_field
.iter()
.enumerate()
.map(|el| (el.0 as DocId, el.1))
.collect::<Vec<_>>();

View File

@@ -14,35 +14,27 @@ use crate::Opstamp;
// The doc to opstamp mapping stores precisely an array
// indexed by doc id and storing the opstamp of the document.
//
// This mapping is (for the moment) stricly increasing
// because of the way document id are allocated.
// This mapping is NOT necessarily increasing, because
// we might be sorting documents according to a fast field.
#[derive(Clone)]
pub enum DocToOpstampMapping<'a> {
WithMap(&'a [Opstamp]),
None,
}
impl<'a> From<&'a [u64]> for DocToOpstampMapping<'a> {
fn from(opstamps: &[Opstamp]) -> DocToOpstampMapping {
DocToOpstampMapping::WithMap(opstamps)
}
}
impl<'a> DocToOpstampMapping<'a> {
/// Given an opstamp return the limit doc id L
/// such that all doc id D such that
// D >= L iff opstamp(D) >= than `target_opstamp`.
//
// The edge case opstamp = some doc opstamp is in practise
// never called.
pub fn compute_doc_limit(&self, target_opstamp: Opstamp) -> DocId {
match *self {
DocToOpstampMapping::WithMap(ref doc_opstamps) => {
match doc_opstamps.binary_search(&target_opstamp) {
Ok(doc_id) | Err(doc_id) => doc_id as DocId,
}
/// Assess whether a document should be considered deleted given that it contains
/// a deleted term that was deleted at the opstamp: `delete_opstamp`.
///
/// This function returns true if the `DocToOpstamp` mapping is none or if
/// the `doc_opstamp` is anterior to the delete opstamp.
pub fn is_deleted(&self, doc_id: DocId, delete_opstamp: Opstamp) -> bool {
match self {
Self::WithMap(doc_opstamps) => {
let doc_opstamp = doc_opstamps[doc_id as usize];
doc_opstamp < delete_opstamp
}
DocToOpstampMapping::None => DocId::max_value(),
Self::None => true,
}
}
}
@@ -55,40 +47,17 @@ mod tests {
#[test]
fn test_doc_to_opstamp_mapping_none() {
let doc_to_opstamp_mapping = DocToOpstampMapping::None;
assert_eq!(
doc_to_opstamp_mapping.compute_doc_limit(1),
u32::max_value()
);
assert!(doc_to_opstamp_mapping.is_deleted(1u32, 0u64));
assert!(doc_to_opstamp_mapping.is_deleted(1u32, 2u64));
}
#[test]
fn test_doc_to_opstamp_mapping_complex() {
{
let doc_to_opstamp_mapping = DocToOpstampMapping::from(&[][..]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 0);
}
{
let doc_to_opstamp_mapping = DocToOpstampMapping::from(&[1u64][..]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(2u64), 1);
}
{
let doc_to_opstamp_mapping =
DocToOpstampMapping::from(&[1u64, 12u64, 17u64, 23u64][..]);
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(0u64), 0);
for i in 2u64..13u64 {
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 1);
}
for i in 13u64..18u64 {
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 2);
}
for i in 18u64..24u64 {
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 3);
}
for i in 24u64..30u64 {
assert_eq!(doc_to_opstamp_mapping.compute_doc_limit(i), 4);
}
}
fn test_doc_to_opstamp_mapping_with_map() {
let doc_to_opstamp_mapping = DocToOpstampMapping::WithMap(&[5u64, 1u64, 0u64, 4u64, 3u64]);
assert_eq!(doc_to_opstamp_mapping.is_deleted(0u32, 2u64), false);
assert_eq!(doc_to_opstamp_mapping.is_deleted(1u32, 2u64), true);
assert_eq!(doc_to_opstamp_mapping.is_deleted(2u32, 2u64), true);
assert_eq!(doc_to_opstamp_mapping.is_deleted(3u32, 2u64), false);
assert_eq!(doc_to_opstamp_mapping.is_deleted(4u32, 2u64), false);
}
}

View File

@@ -106,22 +106,18 @@ fn compute_deleted_bitset(
}
// A delete operation should only affect
// document that were inserted after it.
//
// Limit doc helps identify the first document
// that may be affected by the delete operation.
let limit_doc = doc_opstamps.compute_doc_limit(delete_op.opstamp);
// document that were inserted before it.
let inverted_index = segment_reader.inverted_index(delete_op.term.field())?;
if let Some(mut docset) =
inverted_index.read_postings(&delete_op.term, IndexRecordOption::Basic)?
{
let mut deleted_doc = docset.doc();
while deleted_doc != TERMINATED {
if deleted_doc < limit_doc {
delete_bitset.insert(deleted_doc);
let mut doc_matching_deleted_term = docset.doc();
while doc_matching_deleted_term != TERMINATED {
if doc_opstamps.is_deleted(doc_matching_deleted_term, delete_op.opstamp) {
delete_bitset.insert(doc_matching_deleted_term);
might_have_changed = true;
}
deleted_doc = docset.advance();
doc_matching_deleted_term = docset.advance();
}
}
delete_cursor.advance();
@@ -230,14 +226,8 @@ fn index_documents(
let segment_with_max_doc = segment.with_max_doc(max_doc);
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
let delete_bitset_opt = apply_deletes(
&segment_with_max_doc,
&mut delete_cursor,
&doc_opstamps,
last_docstamp,
)?;
let delete_bitset_opt =
apply_deletes(&segment_with_max_doc, &mut delete_cursor, &doc_opstamps)?;
let meta = segment_with_max_doc.meta().clone();
meta.untrack_temp_docstore();
@@ -247,19 +237,26 @@ fn index_documents(
Ok(true)
}
/// `doc_opstamps` is required to be non-empty.
fn apply_deletes(
segment: &Segment,
mut delete_cursor: &mut DeleteCursor,
doc_opstamps: &[Opstamp],
last_docstamp: Opstamp,
) -> crate::Result<Option<BitSet>> {
if delete_cursor.get().is_none() {
// if there are no delete operation in the queue, no need
// to even open the segment.
return Ok(None);
}
let max_doc_opstamp: Opstamp = doc_opstamps
.iter()
.cloned()
.max()
.expect("Empty DocOpstamp is forbidden");
let segment_reader = SegmentReader::open(segment)?;
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
let doc_to_opstamps = DocToOpstampMapping::WithMap(doc_opstamps);
let max_doc = segment.meta().max_doc();
let mut deleted_bitset = BitSet::with_max_value(max_doc);
@@ -268,7 +265,7 @@ fn apply_deletes(
&segment_reader,
&mut delete_cursor,
&doc_to_opstamps,
last_docstamp,
max_doc_opstamp,
)?;
Ok(if may_have_deletes {
Some(deleted_bitset)
@@ -358,7 +355,7 @@ impl IndexWriter {
// dropping the last reference to the segment_updater.
self.drop_sender();
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
let former_workers_handles = std::mem::take(&mut self.workers_join_handle);
for join_handle in former_workers_handles {
join_handle
.join()
@@ -628,7 +625,7 @@ impl IndexWriter {
// and recreate a new one.
self.recreate_document_channel();
let former_workers_join_handle = mem::replace(&mut self.workers_join_handle, Vec::new());
let former_workers_join_handle = std::mem::take(&mut self.workers_join_handle);
for worker_handle in former_workers_join_handle {
let indexing_worker_result = worker_handle
@@ -784,17 +781,44 @@ impl Drop for IndexWriter {
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::collections::HashSet;
use futures::executor::block_on;
use proptest::prelude::*;
use proptest::prop_oneof;
use proptest::strategy::Strategy;
use super::super::operation::UserOperation;
use crate::collector::TopDocs;
use crate::directory::error::LockError;
use crate::error::*;
use crate::fastfield::FastFieldReader;
use crate::indexer::NoMergePolicy;
use crate::query::QueryParser;
use crate::query::TermQuery;
use crate::schema::{self, IndexRecordOption, STRING};
use crate::schema::Cardinality;
use crate::schema::Facet;
use crate::schema::IntOptions;
use crate::schema::TextFieldIndexing;
use crate::schema::TextOptions;
use crate::schema::STORED;
use crate::schema::TEXT;
use crate::schema::{self, IndexRecordOption, FAST, INDEXED, STRING};
use crate::DocAddress;
use crate::Index;
use crate::ReloadPolicy;
use crate::Term;
use crate::{IndexSettings, IndexSortByField, Order};
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed \
do eiusmod tempor incididunt ut labore et dolore magna aliqua. \
Ut enim ad minim veniam, quis nostrud exercitation ullamco \
laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
dolor in reprehenderit in voluptate velit esse cillum dolore eu \
fugiat nulla pariatur. Excepteur sint occaecat cupidatat non \
proident, sunt in culpa qui officia deserunt mollit anim id est \
laborum.";
#[test]
fn test_operations_group() {
@@ -1282,6 +1306,330 @@ mod tests {
assert!(commit_again.is_ok());
}
#[test]
fn test_delete_with_sort_by_field() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let id_field =
schema_builder.add_u64_field("id", schema::INDEXED | schema::STORED | schema::FAST);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Desc,
}),
..Default::default()
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()?;
let index_reader = index.reader()?;
let mut index_writer = index.writer_for_tests()?;
// create and delete docs in same commit
for id in 0u64..5u64 {
index_writer.add_document(doc!(id_field => id));
}
for id in 2u64..4u64 {
index_writer.delete_term(Term::from_field_u64(id_field, id));
}
for id in 5u64..10u64 {
index_writer.add_document(doc!(id_field => id));
}
index_writer.commit()?;
index_reader.reload()?;
let searcher = index_reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
assert_eq!(segment_reader.num_docs(), 8);
assert_eq!(segment_reader.max_doc(), 10);
let fast_field_reader = segment_reader.fast_fields().u64(id_field)?;
let in_order_alive_ids: Vec<u64> = segment_reader
.doc_ids_alive()
.map(|doc| fast_field_reader.get(doc))
.collect();
assert_eq!(&in_order_alive_ids[..], &[9, 8, 7, 6, 5, 4, 1, 0]);
Ok(())
}
#[derive(Debug, Clone, Copy)]
enum IndexingOp {
AddDoc { id: u64 },
DeleteDoc { id: u64 },
Commit,
}
fn operation_strategy() -> impl Strategy<Value = IndexingOp> {
prop_oneof![
(0u64..10u64).prop_map(|id| IndexingOp::DeleteDoc { id }),
(0u64..10u64).prop_map(|id| IndexingOp::AddDoc { id }),
(0u64..2u64).prop_map(|_| IndexingOp::Commit),
]
}
fn expected_ids(ops: &[IndexingOp]) -> (HashMap<u64, u64>, HashSet<u64>) {
let mut existing_ids = HashMap::new();
let mut deleted_ids = HashSet::new();
for &op in ops {
match op {
IndexingOp::AddDoc { id } => {
*existing_ids.entry(id).or_insert(0) += 1;
deleted_ids.remove(&id);
}
IndexingOp::DeleteDoc { id } => {
existing_ids.remove(&id);
deleted_ids.insert(id);
}
_ => {}
}
}
(existing_ids, deleted_ids)
}
fn test_operation_strategy(
ops: &[IndexingOp],
sort_index: bool,
force_merge: bool,
) -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
let text_field = schema_builder.add_text_field(
"text_field",
TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
.set_index_option(schema::IndexRecordOption::WithFreqsAndPositions),
)
.set_stored(),
);
let large_text_field = schema_builder.add_text_field("large_text_field", TEXT | STORED);
let multi_numbers = schema_builder.add_u64_field(
"multi_numbers",
IntOptions::default()
.set_fast(Cardinality::MultiValues)
.set_stored(),
);
let facet_field = schema_builder.add_facet_field("facet", INDEXED);
let schema = schema_builder.build();
let settings = if sort_index {
IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Asc,
}),
..Default::default()
}
} else {
IndexSettings {
..Default::default()
}
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()?;
let mut index_writer = index.writer_for_tests()?;
for &op in ops {
match op {
IndexingOp::AddDoc { id } => {
let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
index_writer
.add_document(doc!(id_field=>id, multi_numbers=> id, multi_numbers => id, text_field => id.to_string(), facet_field => facet, large_text_field=> LOREM));
}
IndexingOp::DeleteDoc { id } => {
index_writer.delete_term(Term::from_field_u64(id_field, id));
}
IndexingOp::Commit => {
index_writer.commit()?;
}
}
}
index_writer.commit()?;
let searcher = index.reader()?.searcher();
if force_merge {
index_writer.wait_merging_threads()?;
let mut index_writer = index.writer_for_tests()?;
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
if segment_ids.len() >= 2 {
block_on(index_writer.merge(&segment_ids)).unwrap();
assert!(index_writer.wait_merging_threads().is_ok());
}
}
let ids: HashSet<u64> = searcher
.segment_readers()
.iter()
.flat_map(|segment_reader| {
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
segment_reader
.doc_ids_alive()
.map(move |doc| ff_reader.get(doc))
})
.collect();
let (expected_ids_and_num_occurences, deleted_ids) = expected_ids(ops);
assert_eq!(
ids,
expected_ids_and_num_occurences
.keys()
.cloned()
.collect::<HashSet<_>>()
);
// multivalue fast field tests
for segment_reader in searcher.segment_readers().iter() {
let ff_reader = segment_reader.fast_fields().u64s(multi_numbers).unwrap();
for doc in segment_reader.doc_ids_alive() {
let mut vals = vec![];
ff_reader.get_vals(doc, &mut vals);
assert_eq!(vals.len(), 2);
assert_eq!(vals[0], vals[1]);
assert!(expected_ids_and_num_occurences.contains_key(&vals[0]));
}
}
// doc store tests
for segment_reader in searcher.segment_readers().iter() {
let store_reader = segment_reader.get_store_reader().unwrap();
// test store iterator
for doc in store_reader.iter(segment_reader.delete_bitset()) {
let id = doc
.unwrap()
.get_first(id_field)
.unwrap()
.u64_value()
.unwrap();
assert!(expected_ids_and_num_occurences.contains_key(&id));
}
// test store random access
for doc_id in segment_reader.doc_ids_alive() {
let id = store_reader
.get(doc_id)
.unwrap()
.get_first(id_field)
.unwrap()
.u64_value()
.unwrap();
assert!(expected_ids_and_num_occurences.contains_key(&id));
let id2 = store_reader
.get(doc_id)
.unwrap()
.get_first(multi_numbers)
.unwrap()
.u64_value()
.unwrap();
assert_eq!(id, id2);
}
}
// test search
let my_text_field = index.schema().get_field("text_field").unwrap();
let do_search = |term: &str| {
let query = QueryParser::for_index(&index, vec![my_text_field])
.parse_query(term)
.unwrap();
let top_docs: Vec<(f32, DocAddress)> =
searcher.search(&query, &TopDocs::with_limit(1000)).unwrap();
top_docs.iter().map(|el| el.1).collect::<Vec<_>>()
};
for (existing_id, count) in expected_ids_and_num_occurences {
assert_eq!(do_search(&existing_id.to_string()).len() as u64, count);
}
for existing_id in deleted_ids {
assert_eq!(do_search(&existing_id.to_string()).len(), 0);
}
// test facets
for segment_reader in searcher.segment_readers().iter() {
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();
let ff_reader = segment_reader.fast_fields().u64(id_field).unwrap();
for doc_id in segment_reader.doc_ids_alive() {
let mut facet_ords = Vec::new();
facet_reader.facet_ords(doc_id, &mut facet_ords);
assert_eq!(facet_ords.len(), 1);
let mut facet = Facet::default();
facet_reader
.facet_from_ord(facet_ords[0], &mut facet)
.unwrap();
let id = ff_reader.get(doc_id);
let facet_expected = Facet::from(&("/cola/".to_string() + &id.to_string()));
assert_eq!(facet, facet_expected);
}
}
Ok(())
}
proptest! {
#[test]
fn test_delete_with_sort_proptest(ops in proptest::collection::vec(operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], true, false).is_ok());
}
#[test]
fn test_delete_without_sort_proptest(ops in proptest::collection::vec(operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], false, false).is_ok());
}
#[test]
fn test_delete_with_sort_proptest_with_merge(ops in proptest::collection::vec(operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], true, true).is_ok());
}
#[test]
fn test_delete_without_sort_proptest_with_merge(ops in proptest::collection::vec(operation_strategy(), 1..10)) {
assert!(test_operation_strategy(&ops[..], false, true).is_ok());
}
}
#[test]
fn test_delete_with_sort_by_field_last_opstamp_is_not_max() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let sort_by_field = schema_builder.add_u64_field("sort_by", FAST);
let id_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "sort_by".to_string(),
order: Order::Asc,
}),
..Default::default()
};
let index = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()?;
let mut index_writer = index.writer_for_tests()?;
// We add a doc...
index_writer.add_document(doc!(sort_by_field => 2u64, id_field => 0u64));
// And remove it.
index_writer.delete_term(Term::from_field_u64(id_field, 0u64));
// We add another doc.
index_writer.add_document(doc!(sort_by_field=>1u64, id_field => 0u64));
// The expected result is a segment with
// maxdoc = 2
// numdoc = 1.
index_writer.commit()?;
let searcher = index.reader()?.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
assert_eq!(segment_reader.max_doc(), 2);
assert_eq!(segment_reader.num_deleted_docs(), 1);
Ok(())
}
#[test]
fn test_index_doc_missing_field() {
let mut schema_builder = schema::Schema::builder();

View File

@@ -1,14 +1,15 @@
use super::doc_id_mapping::DocIdMapping;
use crate::error::DataCorruption;
use crate::fastfield::CompositeFastFieldSerializer;
use crate::fastfield::DeleteBitSet;
use crate::fastfield::DynamicFastFieldReader;
use crate::fastfield::FastFieldDataAccess;
use crate::fastfield::FastFieldReader;
use crate::fastfield::FastFieldSerializer;
use crate::fastfield::FastFieldStats;
use crate::fastfield::MultiValuedFastFieldReader;
use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::indexer::doc_id_mapping::SegmentDocidMapping;
use crate::indexer::SegmentSerializer;
use crate::postings::Postings;
use crate::postings::{InvertedIndexSerializer, SegmentPostings};
@@ -18,17 +19,18 @@ use crate::schema::{Field, Schema};
use crate::store::StoreWriter;
use crate::termdict::TermMerger;
use crate::termdict::TermOrdinal;
use crate::IndexSortByField;
use crate::{common::HasLen, fastfield::MultiValueLength};
use crate::{common::MAX_DOC_LIMIT, IndexSettings};
use crate::{core::Segment, indexer::doc_id_mapping::expect_field_id_for_sort_field};
use crate::{core::SegmentReader, Order};
use crate::{core::SerializableSegment, IndexSortByField};
use crate::{
docset::{DocSet, TERMINATED},
SegmentOrdinal,
};
use crate::{DocId, InvertedIndexReader, SegmentComponent};
use itertools::Itertools;
use measure_time::debug_time;
use std::cmp;
use std::collections::HashMap;
use std::sync::Arc;
@@ -214,7 +216,7 @@ impl IndexMerger {
let mut readers_with_min_sort_values = readers
.into_iter()
.map(|reader| {
let accessor = Self::get_sort_field_accessor(&reader, &sort_by_field)?;
let accessor = Self::get_sort_field_accessor(&reader, sort_by_field)?;
Ok((reader, accessor.min_value()))
})
.collect::<crate::Result<Vec<_>>>()?;
@@ -232,33 +234,24 @@ impl IndexMerger {
fn write_fieldnorms(
&self,
mut fieldnorms_serializer: FieldNormsSerializer,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<()> {
let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema);
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
for field in fields {
fieldnorms_data.clear();
if let Some(doc_id_mapping) = doc_id_mapping {
let fieldnorms_readers: Vec<FieldNormReader> = self
.readers
.iter()
.map(|reader| reader.get_fieldnorms_reader(field))
.collect::<Result<_, _>>()?;
for (doc_id, reader_with_ordinal) in doc_id_mapping {
let fieldnorms_reader =
&fieldnorms_readers[reader_with_ordinal.ordinal as usize];
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(*doc_id);
fieldnorms_data.push(fieldnorm_id);
}
} else {
for reader in &self.readers {
let fieldnorms_reader = reader.get_fieldnorms_reader(field)?;
for doc_id in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(doc_id);
fieldnorms_data.push(fieldnorm_id);
}
}
let fieldnorms_readers: Vec<FieldNormReader> = self
.readers
.iter()
.map(|reader| reader.get_fieldnorms_reader(field))
.collect::<Result<_, _>>()?;
for (doc_id, reader_with_ordinal) in doc_id_mapping.iter() {
let fieldnorms_reader = &fieldnorms_readers[reader_with_ordinal.ordinal as usize];
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(*doc_id);
fieldnorms_data.push(fieldnorm_id);
}
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
}
fieldnorms_serializer.close()?;
@@ -269,8 +262,10 @@ impl IndexMerger {
&self,
fast_field_serializer: &mut CompositeFastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<()> {
debug_time!("write_fast_fields");
for (field, field_entry) in self.schema.fields() {
let field_type = field_entry.field_type();
match field_type {
@@ -318,7 +313,7 @@ impl IndexMerger {
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<()> {
let (min_value, max_value) = self.readers.iter().map(|reader|{
let u64_reader: DynamicFastFieldReader<u64> = reader
@@ -327,7 +322,7 @@ impl IndexMerger {
.expect("Failed to find a reader for single fast field. This is a tantivy bug and it should never happen.");
compute_min_max_val(&u64_reader, reader.max_doc(), reader.delete_bitset())
})
.filter_map(|x| x)
.flatten()
.reduce(|a, b| {
(a.0.min(b.0), a.1.max(b.1))
}).expect("Unexpected error, empty readers in IndexMerger");
@@ -343,51 +338,44 @@ impl IndexMerger {
u64_reader
})
.collect::<Vec<_>>();
if let Some(doc_id_mapping) = doc_id_mapping {
let sorted_doc_ids = doc_id_mapping.iter().map(|(doc_id, reader_with_ordinal)| {
(
doc_id,
&fast_field_readers[reader_with_ordinal.ordinal as usize],
)
});
// add values in order of the new doc_ids
let mut fast_single_field_serializer =
fast_field_serializer.new_u64_fast_field(field, min_value, max_value)?;
for (doc_id, field_reader) in sorted_doc_ids {
let val = field_reader.get(*doc_id);
fast_single_field_serializer.add_val(val)?;
}
fast_single_field_serializer.close_field()?;
Ok(())
} else {
let u64_readers = self.readers.iter()
.filter(|reader|reader.max_doc() != reader.delete_bitset().map(|bit_set|bit_set.len() as u32).unwrap_or(0))
.map(|reader|{
let u64_reader: DynamicFastFieldReader<u64> = reader
.fast_fields()
.typed_fast_field_reader(field)
.expect("Failed to find a reader for single fast field. This is a tantivy bug and it should never happen.");
(reader.max_doc(), u64_reader, reader.delete_bitset())
}).collect::<Vec<_>>();
let mut fast_single_field_serializer =
fast_field_serializer.new_u64_fast_field(field, min_value, max_value)?;
for (max_doc, u64_reader, delete_bitset_opt) in u64_readers {
for doc_id in 0u32..max_doc {
let is_deleted = delete_bitset_opt
.map(|delete_bitset| delete_bitset.is_deleted(doc_id))
.unwrap_or(false);
if !is_deleted {
let val = u64_reader.get(doc_id);
fast_single_field_serializer.add_val(val)?;
}
}
}
fast_single_field_serializer.close_field()?;
Ok(())
let stats = FastFieldStats {
min_value,
max_value,
num_vals: doc_id_mapping.len() as u64,
};
#[derive(Clone)]
struct SortedDocidFieldAccessProvider<'a> {
doc_id_mapping: &'a SegmentDocidMapping<'a>,
fast_field_readers: &'a Vec<DynamicFastFieldReader<u64>>,
}
impl<'a> FastFieldDataAccess for SortedDocidFieldAccessProvider<'a> {
fn get_val(&self, doc: u64) -> u64 {
let (doc_id, reader_with_ordinal) = self.doc_id_mapping[doc as usize];
self.fast_field_readers[reader_with_ordinal.ordinal as usize].get(doc_id)
}
}
let fastfield_accessor = SortedDocidFieldAccessProvider {
doc_id_mapping,
fast_field_readers: &fast_field_readers,
};
let iter1 = doc_id_mapping.iter().map(|(doc_id, reader_with_ordinal)| {
let fast_field_reader = &fast_field_readers[reader_with_ordinal.ordinal as usize];
fast_field_reader.get(*doc_id)
});
let iter2 = doc_id_mapping.iter().map(|(doc_id, reader_with_ordinal)| {
let fast_field_reader = &fast_field_readers[reader_with_ordinal.ordinal as usize];
fast_field_reader.get(*doc_id)
});
fast_field_serializer.create_auto_detect_u64_fast_field(
field,
stats,
fastfield_accessor,
iter1,
iter2,
)?;
Ok(())
}
/// Checks if the readers are disjunct for their sort property and in the correct order to be
@@ -416,7 +404,7 @@ impl IndexMerger {
reader: &SegmentReader,
sort_by_field: &IndexSortByField,
) -> crate::Result<impl FastFieldReader<u64>> {
let field_id = expect_field_id_for_sort_field(&reader.schema(), &sort_by_field)?; // for now expect fastfield, but not strictly required
let field_id = expect_field_id_for_sort_field(reader.schema(), sort_by_field)?; // for now expect fastfield, but not strictly required
let value_accessor = reader.fast_fields().u64_lenient(field_id)?;
Ok(value_accessor)
}
@@ -451,7 +439,7 @@ impl IndexMerger {
pub(crate) fn generate_doc_id_mapping(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<Vec<(DocId, SegmentReaderWithOrdinal)>> {
) -> crate::Result<SegmentDocidMapping> {
let reader_and_field_accessors = self.get_reader_with_sort_field_accessor(sort_by_field)?;
// Loading the field accessor on demand causes a 15x regression
@@ -487,7 +475,7 @@ impl IndexMerger {
})
.map(|(doc_id, reader_with_id, _)| (doc_id, reader_with_id))
.collect::<Vec<_>>();
Ok(sorted_doc_ids)
Ok(SegmentDocidMapping::new(sorted_doc_ids, false))
}
// Creating the index file to point into the data, generic over `BytesFastFieldReader` and
@@ -496,19 +484,21 @@ impl IndexMerger {
// Important: reader_and_field_accessor needs
// to have the same order as self.readers since ReaderWithOrdinal
// is used to index the reader_and_field_accessors vec.
fn write_1_n_fast_field_idx_generic(
fn write_1_n_fast_field_idx_generic<T: MultiValueLength>(
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
reader_and_field_accessors: &[(&SegmentReader, impl MultiValueLength)],
) -> crate::Result<()> {
doc_id_mapping: &SegmentDocidMapping,
reader_and_field_accessors: &[(&SegmentReader, T)],
) -> crate::Result<Vec<u64>> {
let mut total_num_vals = 0u64;
// In the first pass, we compute the total number of vals.
//
// This is required by the bitpacker, as it needs to know
// what should be the bit length use for bitpacking.
let mut idx_num_vals = 0;
for (reader, u64s_reader) in reader_and_field_accessors.iter() {
if let Some(delete_bitset) = reader.delete_bitset() {
idx_num_vals += reader.max_doc() as u64 - delete_bitset.len() as u64;
for doc in 0u32..reader.max_doc() {
if delete_bitset.is_alive(doc) {
let num_vals = u64s_reader.get_len(doc) as u64;
@@ -516,46 +506,49 @@ impl IndexMerger {
}
}
} else {
idx_num_vals += reader.max_doc() as u64;
total_num_vals += u64s_reader.get_total_len();
}
}
let stats = FastFieldStats {
max_value: total_num_vals,
num_vals: idx_num_vals,
min_value: 0,
};
// We can now create our `idx` serializer, and in a second pass,
// can effectively push the different indexes.
if let Some(doc_id_mapping) = doc_id_mapping {
let mut serialize_idx =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
let mut offset = 0;
for (doc_id, reader) in doc_id_mapping {
let reader = &reader_and_field_accessors[reader.ordinal as usize].1;
serialize_idx.add_val(offset)?;
offset += reader.get_len(*doc_id) as u64;
}
serialize_idx.add_val(offset as u64)?;
// copying into a temp vec is not ideal, but the fast field codec api requires random
// access, which is used in the estimation. It's possible to 1. calculate random
// acccess on the fly or 2. change the codec api to make random access optional, but
// they both have also major drawbacks.
serialize_idx.close_field()?;
} else {
let mut serialize_idx =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0, total_num_vals, 0)?;
let mut idx = 0;
for (segment_reader, u64s_reader) in reader_and_field_accessors.iter() {
for doc in segment_reader.doc_ids_alive() {
serialize_idx.add_val(idx)?;
idx += u64s_reader.get_len(doc) as u64;
}
}
serialize_idx.add_val(idx)?;
serialize_idx.close_field()?;
let mut offsets = vec![];
let mut offset = 0;
for (doc_id, reader) in doc_id_mapping.iter() {
let reader = &reader_and_field_accessors[reader.ordinal as usize].1;
offsets.push(offset);
offset += reader.get_len(*doc_id) as u64;
}
Ok(())
offsets.push(offset);
fast_field_serializer.create_auto_detect_u64_fast_field(
field,
stats,
&offsets[..],
offsets.iter().cloned(),
offsets.iter().cloned(),
)?;
Ok(offsets)
}
/// Returns the fastfield index (index for the data, not the data).
fn write_multi_value_fast_field_idx(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
) -> crate::Result<()> {
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<Vec<u64>> {
let reader_and_field_accessors = self.readers.iter().map(|reader|{
let u64s_reader: MultiValuedFastFieldReader<u64> = reader.fast_fields()
.typed_fast_field_multi_reader(field)
@@ -576,9 +569,11 @@ impl IndexMerger {
field: Field,
term_ordinal_mappings: &TermOrdinalMapping,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<()> {
// Multifastfield consists in 2 fastfields.
debug_time!("write_hierarchical_facet_field");
// Multifastfield consists of 2 fastfields.
// The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values.
@@ -603,53 +598,61 @@ impl IndexMerger {
let mut serialize_vals =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0u64, max_term_ord, 1)?;
let mut vals = Vec::with_capacity(100);
if let Some(doc_id_mapping) = doc_id_mapping {
for (old_doc_id, reader_with_ordinal) in doc_id_mapping {
let term_ordinal_mapping: &[TermOrdinal] =
term_ordinal_mappings.get_segment(reader_with_ordinal.ordinal as usize);
let ff_reader = &fast_field_reader[reader_with_ordinal.ordinal as usize];
ff_reader.get_vals(*old_doc_id, &mut vals);
for &prev_term_ord in &vals {
let new_term_ord = term_ordinal_mapping[prev_term_ord as usize];
serialize_vals.add_val(new_term_ord)?;
}
}
} else {
for (segment_ord, segment_reader) in self.readers.iter().enumerate() {
let term_ordinal_mapping: &[TermOrdinal] =
term_ordinal_mappings.get_segment(segment_ord);
let ff_reader = &fast_field_reader[segment_ord as usize];
// TODO optimize if no deletes
for doc in segment_reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals);
for &prev_term_ord in &vals {
let new_term_ord = term_ordinal_mapping[prev_term_ord as usize];
serialize_vals.add_val(new_term_ord)?;
}
}
for (old_doc_id, reader_with_ordinal) in doc_id_mapping.iter() {
let term_ordinal_mapping: &[TermOrdinal] =
term_ordinal_mappings.get_segment(reader_with_ordinal.ordinal as usize);
let ff_reader = &fast_field_reader[reader_with_ordinal.ordinal as usize];
ff_reader.get_vals(*old_doc_id, &mut vals);
for &prev_term_ord in &vals {
let new_term_ord = term_ordinal_mapping[prev_term_ord as usize];
serialize_vals.add_val(new_term_ord)?;
}
}
serialize_vals.close_field()?;
}
Ok(())
}
/// Creates a mapping if the segments are stacked. this is helpful to merge codelines between index
/// sorting and the others
pub(crate) fn get_doc_id_from_concatenated_data(&self) -> crate::Result<SegmentDocidMapping> {
let mapping: Vec<_> = self
.readers
.iter()
.enumerate()
.map(|(ordinal, reader)| {
let reader_with_ordinal = SegmentReaderWithOrdinal {
ordinal: ordinal as u32,
reader,
};
reader
.doc_ids_alive()
.map(move |doc_id| (doc_id, reader_with_ordinal))
})
.flatten()
.collect();
Ok(SegmentDocidMapping::new(mapping, true))
}
fn write_multi_fast_field(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<()> {
// Multifastfield consists in 2 fastfields.
// The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values.
// First we merge the idx fast field.
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
let offsets =
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
let mut min_value = u64::max_value();
let mut max_value = u64::min_value();
let mut num_vals = 0;
let mut vals = Vec::with_capacity(100);
@@ -675,6 +678,7 @@ impl IndexMerger {
min_value = cmp::min(val, min_value);
max_value = cmp::max(val, max_value);
}
num_vals += vals.len();
}
ff_readers.push(ff_reader);
// TODO optimize when no deletes
@@ -685,40 +689,76 @@ impl IndexMerger {
max_value = 0;
}
let fast_field_reader = self
.readers
.iter()
.map(|reader| {
let ff_reader : MultiValuedFastFieldReader<u64> = reader.fast_fields()
.typed_fast_field_multi_reader(field)
.expect("Failed to find index for multivalued field. This is a bug in tantivy, please report.");
ff_reader
})
.collect::<Vec<_>>();
// We can now initialize our serializer, and push it the different values
let mut serialize_vals =
fast_field_serializer.new_u64_fast_field_with_idx(field, min_value, max_value, 1)?;
if let Some(doc_id_mapping) = doc_id_mapping {
for (doc_id, reader_with_ordinal) in doc_id_mapping {
let ff_reader = &fast_field_reader[reader_with_ordinal.ordinal as usize];
ff_reader.get_vals(*doc_id, &mut vals);
for &val in &vals {
serialize_vals.add_val(val)?;
}
}
} else {
for (reader, ff_reader) in self.readers.iter().zip(ff_readers) {
// TODO optimize if no deletes
for doc in reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals);
for &val in &vals {
serialize_vals.add_val(val)?;
}
}
let stats = FastFieldStats {
max_value,
num_vals: num_vals as u64,
min_value,
};
struct SortedDocidMultiValueAccessProvider<'a> {
doc_id_mapping: &'a SegmentDocidMapping<'a>,
fast_field_readers: &'a Vec<MultiValuedFastFieldReader<u64>>,
offsets: Vec<u64>,
}
impl<'a> FastFieldDataAccess for SortedDocidMultiValueAccessProvider<'a> {
fn get_val(&self, pos: u64) -> u64 {
// use the offsets index to find the doc_id which will contain the position.
// the offsets are stricly increasing so we can do a simple search on it.
let new_docid = self
.offsets
.iter()
.position(|&offset| offset > pos)
.expect("pos is out of bounds")
- 1;
// now we need to find the position of `pos` in the multivalued bucket
let num_pos_covered_until_now = self.offsets[new_docid];
let pos_in_values = pos - num_pos_covered_until_now;
let (old_doc_id, reader_with_ordinal) = self.doc_id_mapping[new_docid as usize];
let num_vals = self.fast_field_readers[reader_with_ordinal.ordinal as usize]
.get_len(old_doc_id);
assert!(num_vals >= pos_in_values);
let mut vals = vec![];
self.fast_field_readers[reader_with_ordinal.ordinal as usize]
.get_vals(old_doc_id, &mut vals);
vals[pos_in_values as usize]
}
}
serialize_vals.close_field()?;
let fastfield_accessor = SortedDocidMultiValueAccessProvider {
doc_id_mapping,
fast_field_readers: &ff_readers,
offsets,
};
let iter1 = doc_id_mapping
.iter()
.map(|(doc_id, reader_with_ordinal)| {
let ff_reader = &ff_readers[reader_with_ordinal.ordinal as usize];
let mut vals = vec![];
ff_reader.get_vals(*doc_id, &mut vals);
vals.into_iter()
})
.flatten();
let iter2 = doc_id_mapping
.iter()
.map(|(doc_id, reader_with_ordinal)| {
let ff_reader = &ff_readers[reader_with_ordinal.ordinal as usize];
let mut vals = vec![];
ff_reader.get_vals(*doc_id, &mut vals);
vals.into_iter()
})
.flatten();
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
field,
stats,
fastfield_accessor,
iter1,
iter2,
1,
)?;
Ok(())
}
@@ -726,7 +766,7 @@ impl IndexMerger {
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<()> {
let reader_and_field_accessors = self
.readers
@@ -745,24 +785,13 @@ impl IndexMerger {
&reader_and_field_accessors,
)?;
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1);
if let Some(doc_id_mapping) = doc_id_mapping {
for (doc_id, reader_with_ordinal) in doc_id_mapping {
let bytes_reader =
&reader_and_field_accessors[reader_with_ordinal.ordinal as usize].1;
let val = bytes_reader.get_bytes(*doc_id);
serialize_vals.write_all(val)?;
}
} else {
for segment_reader in &self.readers {
let bytes_reader = segment_reader.fast_fields().bytes(field)
.expect("Failed to find bytes field in fast field reader. This is a bug in tantivy. Please report.");
// TODO: optimize if no deletes
for doc in segment_reader.doc_ids_alive() {
let val = bytes_reader.get_bytes(doc);
serialize_vals.write_all(val)?;
}
}
for (doc_id, reader_with_ordinal) in doc_id_mapping.iter() {
let bytes_reader = &reader_and_field_accessors[reader_with_ordinal.ordinal as usize].1;
let val = bytes_reader.get_bytes(*doc_id);
serialize_vals.write_all(val)?;
}
serialize_vals.flush()?;
Ok(())
}
@@ -773,8 +802,9 @@ impl IndexMerger {
field_type: &FieldType,
serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<Option<TermOrdinalMapping>> {
debug_time!("write_postings_for_field");
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new();
@@ -799,40 +829,23 @@ impl IndexMerger {
};
let mut merged_terms = TermMerger::new(field_term_streams);
let mut max_doc = 0;
// map from segment doc ids to the resulting merged segment doc id.
let mut merged_doc_id_map: Vec<Vec<Option<DocId>>> = Vec::with_capacity(self.readers.len());
if let Some(doc_id_mapping) = doc_id_mapping {
merged_doc_id_map = self
.readers
.iter()
.map(|reader| {
let mut segment_local_map = vec![];
segment_local_map.resize(reader.max_doc() as usize, None);
segment_local_map
})
.collect();
for (new_doc_id, (old_doc_id, segment_and_ordinal)) in doc_id_mapping.iter().enumerate()
{
let segment_map = &mut merged_doc_id_map[segment_and_ordinal.ordinal as usize];
segment_map[*old_doc_id as usize] = Some(new_doc_id as DocId);
}
} else {
for reader in &self.readers {
let mut segment_local_map = Vec::with_capacity(reader.max_doc() as usize);
for doc_id in 0..reader.max_doc() {
if reader.is_deleted(doc_id) {
segment_local_map.push(None);
} else {
segment_local_map.push(Some(max_doc));
max_doc += 1u32;
}
}
merged_doc_id_map.push(segment_local_map);
}
let mut merged_doc_id_map: Vec<Vec<Option<DocId>>> = self
.readers
.iter()
.map(|reader| {
let mut segment_local_map = vec![];
segment_local_map.resize(reader.max_doc() as usize, None);
segment_local_map
})
.collect();
for (new_doc_id, (old_doc_id, segment_and_ordinal)) in doc_id_mapping.iter().enumerate() {
let segment_map = &mut merged_doc_id_map[segment_and_ordinal.ordinal as usize];
segment_map[*old_doc_id as usize] = Some(new_doc_id as DocId);
}
// The total number of tokens will only be exact when there has been no deletes.
//
// Otherwise, we approximate by removing deleted documents proportionally.
@@ -928,7 +941,7 @@ impl IndexMerger {
// I think this is not strictly necessary, it would be possible to
// avoid the loading into a vec via some form of kmerge, but then the merge
// logic would deviate much more from the stacking case (unsorted index)
if doc_id_mapping.is_some() {
if !doc_id_mapping.is_trivial() {
doc_id_and_positions.push((
remapped_doc_id,
term_freq,
@@ -943,7 +956,7 @@ impl IndexMerger {
doc = segment_postings.advance();
}
}
if doc_id_mapping.is_some() {
if !doc_id_mapping.is_trivial() {
doc_id_and_positions.sort_unstable_by_key(|&(doc_id, _, _)| doc_id);
for (doc_id, term_freq, positions) in &doc_id_and_positions {
field_serializer.write_doc(*doc_id, *term_freq, positions);
@@ -962,7 +975,7 @@ impl IndexMerger {
&self,
serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() {
@@ -985,8 +998,10 @@ impl IndexMerger {
fn write_storable_fields(
&self,
store_writer: &mut StoreWriter,
doc_id_mapping: &Option<Vec<(DocId, SegmentReaderWithOrdinal)>>,
doc_id_mapping: &SegmentDocidMapping,
) -> crate::Result<()> {
debug_time!("write_storable_fields");
let store_readers: Vec<_> = self
.readers
.iter()
@@ -997,8 +1012,8 @@ impl IndexMerger {
.enumerate()
.map(|(i, store)| store.iter_raw(self.readers[i].delete_bitset()))
.collect();
if let Some(doc_id_mapping) = doc_id_mapping {
for (old_doc_id, reader_with_ordinal) in doc_id_mapping {
if !doc_id_mapping.is_trivial() {
for (old_doc_id, reader_with_ordinal) in doc_id_mapping.iter() {
let doc_bytes_it = &mut document_iterators[reader_with_ordinal.ordinal as usize];
if let Some(doc_bytes_res) = doc_bytes_it.next() {
let doc_bytes = doc_bytes_res?;
@@ -1042,25 +1057,24 @@ impl IndexMerger {
}
Ok(())
}
}
impl SerializableSegment for IndexMerger {
fn write(
&self,
mut serializer: SegmentSerializer,
_: Option<&DocIdMapping>,
) -> crate::Result<u32> {
/// Writes the merged segment by pushing information
/// to the `SegmentSerializer`.
///
/// # Returns
/// The number of documents in the resulting segment.
pub fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
let doc_id_mapping = if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref()
{
// If the documents are already sorted and stackable, we ignore the mapping and execute
// it as if there was no sorting
if self.is_disjunct_and_sorted_on_sort_property(sort_by_field)? {
None
self.get_doc_id_from_concatenated_data()?
} else {
Some(self.generate_doc_id_mapping(sort_by_field)?)
self.generate_doc_id_mapping(sort_by_field)?
}
} else {
None
self.get_doc_id_from_concatenated_data()?
};
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {

View File

@@ -362,6 +362,7 @@ mod bench_sorted_index_merge {
use crate::core::Index;
//use cratedoc_id, readerdoc_id_mappinglet vals = reader.fate::schema;
use crate::fastfield::DynamicFastFieldReader;
use crate::fastfield::FastFieldReader;
use crate::indexer::merger::IndexMerger;
use crate::schema::Cardinality;
@@ -422,7 +423,7 @@ mod bench_sorted_index_merge {
b.iter(|| {
let sorted_doc_ids = doc_id_mapping.iter().map(|(doc_id, reader)|{
let u64_reader: FastFieldReader<u64> = reader.reader
let u64_reader: DynamicFastFieldReader<u64> = reader.reader
.fast_fields()
.typed_fast_field_reader(field)
.expect("Failed to find a reader for single fast field. This is a tantivy bug and it should never happen.");

View File

@@ -32,6 +32,12 @@ impl SegmentRegisters {
} else if self.committed.contains_all(segment_ids) {
Some(SegmentsStatus::Committed)
} else {
warn!(
"segment_ids: {:?}, committed_ids: {:?}, uncommitted_ids {:?}",
segment_ids,
self.committed.segment_ids(),
self.uncommitted.segment_ids()
);
None
}
}
@@ -58,21 +64,6 @@ impl Debug for SegmentManager {
}
}
pub fn get_mergeable_segments(
in_merge_segment_ids: &HashSet<SegmentId>,
segment_manager: &SegmentManager,
) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
let registers_lock = segment_manager.read();
(
registers_lock
.committed
.get_mergeable_segments(in_merge_segment_ids),
registers_lock
.uncommitted
.get_mergeable_segments(in_merge_segment_ids),
)
}
impl SegmentManager {
pub fn from_segments(
segment_metas: Vec<SegmentMeta>,
@@ -86,6 +77,20 @@ impl SegmentManager {
}
}
pub fn get_mergeable_segments(
&self,
in_merge_segment_ids: &HashSet<SegmentId>,
) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
let registers_lock = self.read();
(
registers_lock
.committed
.get_mergeable_segments(in_merge_segment_ids),
registers_lock
.uncommitted
.get_mergeable_segments(in_merge_segment_ids),
)
}
/// Returns all of the segment entries (committed or uncommitted)
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
let registers_lock = self.read();

View File

@@ -4,6 +4,7 @@ use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::segment_entry::SegmentEntry;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt::Display;
use std::fmt::{self, Debug, Formatter};
/// The segment register keeps track
@@ -29,6 +30,16 @@ impl Debug for SegmentRegister {
Ok(())
}
}
impl Display for SegmentRegister {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "SegmentRegister(")?;
for k in self.segment_states.keys() {
write!(f, "{}, ", k.short_uuid_string())?;
}
write!(f, ")")?;
Ok(())
}
}
impl SegmentRegister {
pub fn clear(&mut self) {
@@ -46,6 +57,10 @@ impl SegmentRegister {
.collect()
}
pub fn segment_ids(&self) -> Vec<SegmentId> {
self.segment_states.keys().cloned().collect()
}
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
self.segment_states.values().cloned().collect()
}

View File

@@ -1,11 +1,10 @@
use super::segment_manager::{get_mergeable_segments, SegmentManager};
use super::segment_manager::SegmentManager;
use crate::core::Index;
use crate::core::IndexMeta;
use crate::core::IndexSettings;
use crate::core::Segment;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SerializableSegment;
use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
use crate::indexer::delete_queue::DeleteCursor;
@@ -140,7 +139,7 @@ fn merge(
// ... we just serialize this index merger in our new segment to merge the segments.
let segment_serializer = SegmentSerializer::for_segment(merged_segment.clone(), true)?;
let num_docs = merger.write(segment_serializer, None)?;
let num_docs = merger.write(segment_serializer)?;
let merged_segment_id = merged_segment.id();
@@ -209,7 +208,7 @@ pub fn merge_segments<Dir: Directory>(
&segments[..],
)?;
let segment_serializer = SegmentSerializer::for_segment(merged_segment, true)?;
let num_docs = merger.write(segment_serializer, None)?;
let num_docs = merger.write(segment_serializer)?;
let segment_meta = merged_index.new_segment_meta(merged_segment_id, num_docs);
@@ -528,10 +527,14 @@ impl SegmentUpdater {
}))
}
async fn consider_merge_options(&self) {
pub(crate) fn get_mergeable_segments(&self) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
let (committed_segments, uncommitted_segments) =
get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
self.segment_manager
.get_mergeable_segments(&merge_segment_ids)
}
async fn consider_merge_options(&self) {
let (committed_segments, uncommitted_segments) = self.get_mergeable_segments();
// Committed segments cannot be merged with uncommitted_segments.
// We therefore consider merges using these two sets of segments independently.
@@ -717,7 +720,7 @@ mod tests {
let seg_ids = index.searchable_segment_ids()?;
// docs exist, should have at least 1 segment
assert!(seg_ids.len() > 0);
assert!(!seg_ids.is_empty());
let term_vals = vec!["a", "b", "c", "d", "e", "f"];
for term_val in term_vals {

View File

@@ -12,12 +12,12 @@ use crate::schema::Schema;
use crate::schema::Term;
use crate::schema::Value;
use crate::schema::{Field, FieldEntry};
use crate::store::StoreReader;
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
use crate::tokenizer::{TokenStreamChain, Tokenizer};
use crate::Opstamp;
use crate::{core::Segment, store::StoreWriter};
use crate::{core::SerializableSegment, store::StoreReader};
use crate::{DocId, SegmentComponent};
/// Computes the initial size of the hash table.
@@ -36,6 +36,20 @@ fn initial_table_size(per_thread_memory_budget: usize) -> crate::Result<usize> {
}
}
fn remap_doc_opstamps(
opstamps: Vec<Opstamp>,
doc_id_mapping_opt: Option<&DocIdMapping>,
) -> Vec<Opstamp> {
if let Some(doc_id_mapping_opt) = doc_id_mapping_opt {
doc_id_mapping_opt
.iter_old_doc_ids()
.map(|doc| opstamps[doc as usize])
.collect()
} else {
opstamps
}
}
/// A `SegmentWriter` is in charge of creating segment index from a
/// set of documents.
///
@@ -112,14 +126,15 @@ impl SegmentWriter {
.clone()
.map(|sort_by_field| get_doc_id_mapping_from_field(sort_by_field, &self))
.transpose()?;
write(
remap_and_write(
&self.multifield_postings,
&self.fast_field_writers,
&self.fieldnorms_writer,
self.segment_serializer,
mapping.as_ref(),
)?;
Ok(self.doc_opstamps)
let doc_opstamps = remap_doc_opstamps(self.doc_opstamps, mapping.as_ref());
Ok(doc_opstamps)
}
pub fn mem_usage(&self) -> usize {
@@ -176,7 +191,7 @@ impl SegmentWriter {
.process(&mut |token| {
term_buffer.set_text(&token.text);
let unordered_term_id =
multifield_postings.subscribe(doc_id, &term_buffer);
multifield_postings.subscribe(doc_id, term_buffer);
unordered_term_id_opt = Some(unordered_term_id);
});
if let Some(unordered_term_id) = unordered_term_id_opt {
@@ -237,7 +252,7 @@ impl SegmentWriter {
.u64_value()
.ok_or_else(make_schema_error)?;
term_buffer.set_u64(u64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
multifield_postings.subscribe(doc_id, term_buffer);
}
}
FieldType::Date(_) => {
@@ -248,7 +263,7 @@ impl SegmentWriter {
.date_value()
.ok_or_else(make_schema_error)?;
term_buffer.set_i64(date_val.timestamp());
multifield_postings.subscribe(doc_id, &term_buffer);
multifield_postings.subscribe(doc_id, term_buffer);
}
}
FieldType::I64(_) => {
@@ -259,7 +274,7 @@ impl SegmentWriter {
.i64_value()
.ok_or_else(make_schema_error)?;
term_buffer.set_i64(i64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
multifield_postings.subscribe(doc_id, term_buffer);
}
}
FieldType::F64(_) => {
@@ -270,7 +285,7 @@ impl SegmentWriter {
.f64_value()
.ok_or_else(make_schema_error)?;
term_buffer.set_f64(f64_val);
multifield_postings.subscribe(doc_id, &term_buffer);
multifield_postings.subscribe(doc_id, term_buffer);
}
}
FieldType::Bytes(_) => {
@@ -281,7 +296,7 @@ impl SegmentWriter {
.bytes_value()
.ok_or_else(make_schema_error)?;
term_buffer.set_bytes(bytes);
self.multifield_postings.subscribe(doc_id, &term_buffer);
self.multifield_postings.subscribe(doc_id, term_buffer);
}
}
}
@@ -315,8 +330,12 @@ impl SegmentWriter {
}
}
// This method is used as a trick to workaround the borrow checker
fn write(
/// This method is used as a trick to workaround the borrow checker
/// Writes a view of a segment by pushing information
/// to the `SegmentSerializer`.
///
/// `doc_id_map` is used to map to the new doc_id order.
fn remap_and_write(
multifield_postings: &MultiFieldPostingsWriter,
fast_field_writers: &FastFieldsWriter,
fieldnorms_writer: &FieldNormsWriter,
@@ -340,6 +359,7 @@ fn write(
&term_ord_map,
doc_id_map,
)?;
// finalize temp docstore and create version, which reflects the doc_id_map
if let Some(doc_id_map) = doc_id_map {
let store_write = serializer
@@ -356,31 +376,16 @@ fn write(
.segment()
.open_read(SegmentComponent::TempStore)?,
)?;
for old_doc_id in doc_id_map.iter_old_doc_ids() {
let doc_bytes = store_read.get_document_bytes(*old_doc_id)?;
let doc_bytes = store_read.get_document_bytes(old_doc_id)?;
serializer.get_store_writer().store_bytes(&doc_bytes)?;
}
}
serializer.close()?;
Ok(())
}
impl SerializableSegment for SegmentWriter {
fn write(
&self,
serializer: SegmentSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> crate::Result<u32> {
let max_doc = self.max_doc;
write(
&self.multifield_postings,
&self.fast_field_writers,
&self.fieldnorms_writer,
serializer,
doc_id_map,
)?;
Ok(max_doc)
}
serializer.close()?;
Ok(())
}
#[cfg(test)]

View File

@@ -1,6 +1,13 @@
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
#![cfg_attr(all(feature = "unstable", test), feature(test))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
#![cfg_attr(
feature = "cargo-clippy",
allow(
clippy::module_inception,
clippy::needless_range_loop,
clippy::bool_assert_comparison
)
)]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![warn(missing_docs)]
@@ -100,7 +107,6 @@
#[cfg_attr(test, macro_use)]
extern crate serde_json;
#[macro_use]
extern crate log;
@@ -934,7 +940,7 @@ mod tests {
let id = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
let index_reader = index.reader()?;
let mut index_writer = index.writer_for_tests()?;
@@ -973,7 +979,7 @@ mod tests {
let searcher = index_reader.searcher();
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.into_iter()
.iter()
.map(|reader| reader.segment_id())
.collect();
block_on(index_writer.merge(&segment_ids)).unwrap();

View File

@@ -46,7 +46,7 @@ pub mod tests {
fn create_positions_data(vals: &[u32]) -> crate::Result<OwnedBytes> {
let mut positions_buffer = vec![];
let mut serializer = PositionSerializer::new(&mut positions_buffer);
serializer.write_positions_delta(&vals);
serializer.write_positions_delta(vals);
serializer.close_term()?;
serializer.close()?;
Ok(OwnedBytes::new(positions_buffer))
@@ -169,7 +169,7 @@ pub mod tests {
let positions_delta: Vec<u32> = (0..2_000_000).collect();
let positions_data = create_positions_data(&positions_delta[..])?;
assert_eq!(positions_data.len(), 5003499);
let mut position_reader = PositionReader::open(positions_data.clone())?;
let mut position_reader = PositionReader::open(positions_data)?;
let mut buf = [0u32; 256];
position_reader.read(128, &mut buf);
for i in 0..256 {

View File

@@ -57,7 +57,7 @@ mod sse2 {
fn test_linear_search_sse2_128_u32() {
let mut block = [0u32; COMPRESSION_BLOCK_SIZE];
for el in 0u32..128u32 {
block[el as usize] = el * 2 + 1 << 18;
block[el as usize] = (el * 2 + 1) << 18;
}
let target = block[64] + 1;
assert_eq!(linear_search_sse2_128(&AlignedBuffer(block), target), 65);
@@ -91,7 +91,7 @@ fn exponential_search(arr: &[u32], target: u32) -> Range<usize> {
#[inline(never)]
fn galloping(block_docs: &[u32], target: u32) -> usize {
let range = exponential_search(&block_docs, target);
let range = exponential_search(block_docs, target);
range.start + linear_search(&block_docs[range], target)
}

View File

@@ -13,11 +13,7 @@ use crate::schema::IndexRecordOption;
use crate::{DocId, Score, TERMINATED};
fn max_score<I: Iterator<Item = Score>>(mut it: I) -> Option<Score> {
if let Some(first) = it.next() {
Some(it.fold(first, Score::max))
} else {
None
}
it.next().map(|first| it.fold(first, Score::max))
}
/// `BlockSegmentPostings` is a cursor iterating over blocks
@@ -482,11 +478,11 @@ mod tests {
docs.push((i * i / 100) + i);
}
let mut block_postings = build_block_postings(&docs[..]);
for i in vec![0, 424, 10000] {
block_postings.seek(i);
for i in &[0, 424, 10000] {
block_postings.seek(*i);
let docs = block_postings.docs();
assert!(docs[0] <= i);
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
assert!(docs[0] <= *i);
assert!(docs.last().cloned().unwrap_or(0u32) >= *i);
}
block_postings.seek(100_000);
assert_eq!(block_postings.doc(COMPRESSION_BLOCK_SIZE - 1), TERMINATED);

View File

@@ -85,13 +85,13 @@ impl BlockDecoder {
) -> usize {
self.output_len = COMPRESSION_BLOCK_SIZE;
self.bitpacker
.decompress_sorted(offset, &compressed_data, &mut self.output.0, num_bits)
.decompress_sorted(offset, compressed_data, &mut self.output.0, num_bits)
}
pub fn uncompress_block_unsorted(&mut self, compressed_data: &[u8], num_bits: u8) -> usize {
self.output_len = COMPRESSION_BLOCK_SIZE;
self.bitpacker
.decompress(&compressed_data, &mut self.output.0, num_bits)
.decompress(compressed_data, &mut self.output.0, num_bits)
}
#[inline]
@@ -303,7 +303,7 @@ pub mod tests {
assert!(encoded_data.len() <= expected_length);
let mut decoder = BlockDecoder::default();
let consumed_num_bytes =
decoder.uncompress_vint_sorted(&encoded_data, *offset, input.len(), PADDING_VALUE);
decoder.uncompress_vint_sorted(encoded_data, *offset, input.len(), PADDING_VALUE);
assert_eq!(consumed_num_bytes, encoded_data.len());
assert_eq!(input, decoder.output_array());
for i in input.len()..COMPRESSION_BLOCK_SIZE {
@@ -354,18 +354,18 @@ mod bench {
});
}
#[test]
fn test_all_docs_compression_numbits() {
for expected_num_bits in 0u8.. {
let mut data = [0u32; 128];
if expected_num_bits > 0 {
data[0] = (1u64 << (expected_num_bits as usize) - 1) as u32;
}
let mut encoder = BlockEncoder::new();
let (num_bits, compressed) = encoder.compress_block_unsorted(&data);
assert_eq!(compressed.len(), compressed_block_size(num_bits));
}
}
//#[test]
//fn test_all_docs_compression_numbits() {
//for expected_num_bits in 0u8.. {
//let mut data = [0u32; 128];
//if expected_num_bits > 0 {
//data[0] = (1u64 << (expected_num_bits as usize) - 1) as u32;
//}
//let mut encoder = BlockEncoder::new();
//let (num_bits, compressed) = encoder.compress_block_unsorted(&data);
//assert_eq!(compressed.len(), compressed_block_size(num_bits));
//}
//}
const NUM_INTS_BENCH_VINT: usize = 10;

View File

@@ -54,7 +54,7 @@ pub mod tests {
use crate::DocId;
use crate::HasLen;
use crate::Score;
use std::{iter, mem};
use std::mem;
#[test]
pub fn test_position_write() -> crate::Result<()> {
@@ -153,8 +153,8 @@ pub mod tests {
#[test]
pub fn test_drop_token_that_are_too_long() -> crate::Result<()> {
let ok_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN).collect();
let mut exceeding_token_text: String = iter::repeat('A').take(MAX_TOKEN_LEN + 1).collect();
let ok_token_text: String = "A".repeat(MAX_TOKEN_LEN);
let mut exceeding_token_text: String = "A".repeat(MAX_TOKEN_LEN + 1);
exceeding_token_text.push_str(" hello");
let mut schema_builder = Schema::builder();
let text_options = TextOptions::default().set_indexing_options(
@@ -164,7 +164,7 @@ pub mod tests {
);
let text_field = schema_builder.add_text_field("text", text_options);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
index
.tokenizers()
.register("simple_no_truncation", SimpleTokenizer);
@@ -229,7 +229,7 @@ pub mod tests {
segment_writer.add_document(op, &schema).unwrap();
}
for i in 2..1000 {
let mut text: String = iter::repeat("e ").take(i).collect();
let mut text: String = "e ".repeat(i);
text.push_str(" a");
let op = AddOperation {
opstamp: 2u64,
@@ -409,7 +409,7 @@ pub mod tests {
.unwrap();
for i in 0..num_docs / 2 - 1 {
assert!(segment_postings.seek(i * 2 + 1) > (i * 1) * 2);
assert!(segment_postings.seek(i * 2 + 1) > i * 2);
assert_eq!(segment_postings.doc(), (i + 1) * 2);
}
}
@@ -431,13 +431,10 @@ pub mod tests {
.read_postings(&term_2, IndexRecordOption::Basic)?
.unwrap();
assert_eq!(segment_postings.seek(i), i);
assert_eq!(segment_postings.doc(), i);
if i % 2 == 0 {
assert_eq!(segment_postings.seek(i), i);
assert_eq!(segment_postings.doc(), i);
assert!(segment_reader.is_deleted(i));
} else {
assert_eq!(segment_postings.seek(i), i);
assert_eq!(segment_postings.doc(), i);
}
}

View File

@@ -231,13 +231,7 @@ pub trait PostingsWriter {
// We skip all tokens with a len greater than u16.
if token.text.len() <= MAX_TOKEN_LEN {
term_buffer.set_text(token.text.as_str());
self.subscribe(
term_index,
doc_id,
token.position as u32,
&term_buffer,
heap,
);
self.subscribe(term_index, doc_id, token.position as u32, term_buffer, heap);
} else {
warn!(
"A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \

View File

@@ -282,7 +282,7 @@ impl Recorder for TfAndPositionRecorder {
doc_id_and_positions
.push((doc_id_map.get_new_doc_id(doc), buffer_positions.to_vec()));
} else {
serializer.write_doc(doc, buffer_positions.len() as u32, &buffer_positions);
serializer.write_doc(doc, buffer_positions.len() as u32, buffer_positions);
}
}
if doc_id_map.is_some() {

View File

@@ -107,7 +107,7 @@ impl SegmentPostings {
let fieldnorm_reader = fieldnorms.map(FieldNormReader::for_test);
let average_field_norm = fieldnorms
.map(|fieldnorms| {
if fieldnorms.len() == 0 {
if fieldnorms.is_empty() {
return 0.0;
}
let total_num_tokens: u64 = fieldnorms
@@ -184,7 +184,7 @@ impl DocSet for SegmentPostings {
// At this point we are on the block, that might contain our document.
let output = self.block_cursor.docs_aligned();
self.cur = self.block_searcher.search_in_block(&output, target);
self.cur = self.block_searcher.search_in_block(output, target);
// The last block is not full and padded with the value TERMINATED,
// so that we are guaranteed to have at least doc in the block (a real one or the padding)

View File

@@ -356,7 +356,7 @@ impl<W: Write> PostingsSerializer<W> {
// encode the doc ids
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_sorted(&self.block.doc_ids(), self.last_doc_id_encoded);
.compress_block_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
self.last_doc_id_encoded = self.block.last_doc();
self.skip_write
.write_doc(self.last_doc_id_encoded, num_bits);
@@ -366,7 +366,7 @@ impl<W: Write> PostingsSerializer<W> {
if self.mode.has_freq() {
let (num_bits, block_encoded): (u8, &[u8]) = self
.block_encoder
.compress_block_unsorted(&self.block.term_freqs());
.compress_block_unsorted(self.block.term_freqs());
self.postings_write.extend(block_encoded);
self.skip_write.write_term_freq(num_bits);
if self.mode.has_positions() {
@@ -426,7 +426,7 @@ impl<W: Write> PostingsSerializer<W> {
{
let block_encoded = self
.block_encoder
.compress_vint_sorted(&self.block.doc_ids(), self.last_doc_id_encoded);
.compress_vint_sorted(self.block.doc_ids(), self.last_doc_id_encoded);
self.postings_write.write_all(block_encoded)?;
}
// ... Idem for term frequencies

View File

@@ -280,11 +280,8 @@ mod tests {
if v.len() >= 10 {
break;
}
match len_to_capacity(i) {
CapacityResult::NeedAlloc(cap) => {
v.push((i, cap));
}
_ => {}
if let CapacityResult::NeedAlloc(cap) = len_to_capacity(i) {
v.push((i, cap));
}
}
assert_eq!(

View File

@@ -151,7 +151,7 @@ impl TermHashMap {
pub fn iter(&self) -> Iter<'_> {
Iter {
inner: self.occupied.iter(),
hashmap: &self,
hashmap: self,
}
}
@@ -261,8 +261,8 @@ mod tests {
}
let mut vanilla_hash_map = HashMap::new();
let mut iter_values = hash_map.iter();
while let Some((key, addr, _)) = iter_values.next() {
let iter_values = hash_map.iter();
for (key, addr, _) in iter_values {
let val: u32 = hash_map.heap.read(addr);
vanilla_hash_map.insert(key.to_owned(), val);
}

View File

@@ -251,7 +251,7 @@ mod tests {
impl PartialEq for Float {
fn eq(&self, other: &Self) -> bool {
self.cmp(&other) == Ordering::Equal
self.cmp(other) == Ordering::Equal
}
}
@@ -289,7 +289,7 @@ mod tests {
if !nearly_equals(score, limit) {
checkpoints.push((doc, score));
}
return limit;
limit
});
checkpoints
}
@@ -368,10 +368,10 @@ mod tests {
.iter()
.map(|posting_list| {
posting_list
.into_iter()
.iter()
.cloned()
.flat_map(|(doc, term_freq)| {
(0 as u32..REPEAT as u32).map(move |offset| {
(0_u32..REPEAT as u32).map(move |offset| {
(
doc * (REPEAT as u32) + offset,
if offset == 0 { term_freq } else { 1 },

View File

@@ -329,7 +329,7 @@ impl MoreLikeThis {
continue;
}
let doc_freq = searcher.doc_freq(&term)?;
let doc_freq = searcher.doc_freq(term)?;
// ignore terms with less than min_doc_frequency
if self

View File

@@ -260,7 +260,7 @@ mod tests {
.with_document(DocAddress::new(0, 0));
let top_docs = searcher.search(&query, &TopDocs::with_limit(5)).unwrap();
let mut doc_ids: Vec<_> = top_docs.iter().map(|item| item.1.doc_id).collect();
doc_ids.sort();
doc_ids.sort_unstable();
assert_eq!(doc_ids.len(), 3);
assert_eq!(doc_ids, vec![0, 1, 3]);
@@ -276,7 +276,7 @@ mod tests {
.with_document(DocAddress::new(0, 4));
let top_docs = searcher.search(&query, &TopDocs::with_limit(5)).unwrap();
let mut doc_ids: Vec<_> = top_docs.iter().map(|item| item.1.doc_id).collect();
doc_ids.sort();
doc_ids.sort_unstable();
assert_eq!(doc_ids.len(), 2);
assert_eq!(doc_ids, vec![3, 4]);

View File

@@ -48,43 +48,36 @@ impl PhraseWeight {
) -> crate::Result<Option<PhraseScorer<SegmentPostings>>> {
let similarity_weight = self.similarity_weight.boost_by(boost);
let fieldnorm_reader = self.fieldnorm_reader(reader)?;
let mut term_postings_list = Vec::new();
if reader.has_deletes() {
let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms {
if let Some(postings) = reader
.inverted_index(term.field())?
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions)?
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)?
{
term_postings_list.push((offset, postings));
} else {
return Ok(None);
}
}
Ok(Some(PhraseScorer::new(
term_postings_list,
similarity_weight,
fieldnorm_reader,
self.scoring_enabled,
)))
} else {
let mut term_postings_list = Vec::new();
for &(offset, ref term) in &self.phrase_terms {
if let Some(postings) = reader
.inverted_index(term.field())?
.read_postings_no_deletes(&term, IndexRecordOption::WithFreqsAndPositions)?
.read_postings_no_deletes(term, IndexRecordOption::WithFreqsAndPositions)?
{
term_postings_list.push((offset, postings));
} else {
return Ok(None);
}
}
Ok(Some(PhraseScorer::new(
term_postings_list,
similarity_weight,
fieldnorm_reader,
self.scoring_enabled,
)))
}
Ok(Some(PhraseScorer::new(
term_postings_list,
similarity_weight,
fieldnorm_reader,
self.scoring_enabled,
)))
}
}

View File

@@ -722,7 +722,7 @@ mod test {
let is_not_indexed_err = |query: &str| {
let result: Result<Box<dyn Query>, QueryParserError> = query_parser.parse_query(query);
if let Err(QueryParserError::FieldNotIndexed(field_name)) = result {
Some(field_name.clone())
Some(field_name)
} else {
None
}

View File

@@ -91,8 +91,8 @@ impl RangeQuery {
RangeQuery {
field,
value_type,
left_bound: map_bound(&left_bound, &verify_and_unwrap_term),
right_bound: map_bound(&right_bound, &verify_and_unwrap_term),
left_bound: map_bound(left_bound, &verify_and_unwrap_term),
right_bound: map_bound(right_bound, &verify_and_unwrap_term),
}
}
@@ -493,7 +493,7 @@ mod tests {
let year = schema.get_field("year").unwrap();
index_writer.add_document(doc!(
title => "hemoglobin blood",
year => 1990 as i64
year => 1990_i64
));
index_writer.commit()?;
let reader = index.reader()?;

View File

@@ -58,7 +58,7 @@ pub struct RegexQuery {
impl RegexQuery {
/// Creates a new RegexQuery from a given pattern
pub fn from_pattern(regex_pattern: &str, field: Field) -> crate::Result<Self> {
let regex = Regex::new(&regex_pattern)
let regex = Regex::new(regex_pattern)
.map_err(|_| TantivyError::InvalidArgument(regex_pattern.to_string()))?;
Ok(RegexQuery::from_regex(regex, field))
}
@@ -169,9 +169,9 @@ mod test {
verify_regex_query(matching_one, matching_zero, reader.clone());
let matching_one = RegexQuery::from_regex(r1.clone(), field);
let matching_zero = RegexQuery::from_regex(r2.clone(), field);
let matching_one = RegexQuery::from_regex(r1, field);
let matching_zero = RegexQuery::from_regex(r2, field);
verify_regex_query(matching_one, matching_zero, reader.clone());
verify_regex_query(matching_one, matching_zero, reader);
}
}

View File

@@ -194,8 +194,8 @@ mod tests {
.collect();
let mut fieldnorms: Vec<u32> = vec![];
for i in 0..term_doc_freq {
let (tf, num_extra_terms) = term_freqs_fieldnorms[i];
for item in term_freqs_fieldnorms.iter().take(term_doc_freq) {
let (tf, num_extra_terms) = item;
fieldnorms.push(tf + num_extra_terms);
}
let average_fieldnorm = fieldnorms
@@ -253,7 +253,7 @@ mod tests {
}
fn test_block_wand_aux(term_query: &TermQuery, searcher: &Searcher) -> crate::Result<()> {
let term_weight = term_query.specialized_weight(&searcher, true)?;
let term_weight = term_query.specialized_weight(searcher, true)?;
for reader in searcher.segment_readers() {
let mut block_max_scores = vec![];
let mut block_max_scores_b = vec![];
@@ -309,7 +309,7 @@ mod tests {
}
writer.commit()?;
let term_query = TermQuery::new(
Term::from_field_text(text_field, &"bbbb"),
Term::from_field_text(text_field, "bbbb"),
IndexRecordOption::WithFreqs,
);
let segment_ids: Vec<SegmentId>;

View File

@@ -319,7 +319,7 @@ mod tests {
let res: Box<dyn DocSet> = Box::new(Union::<_, DoNothingCombiner>::from(
docs_list
.iter()
.map(|docs| docs.clone())
.cloned()
.map(VecDocSet::from)
.map(|docset| ConstScorer::new(docset, 1.0))
.collect::<Vec<_>>(),

View File

@@ -253,7 +253,7 @@ mod tests {
threads.push(thread::spawn(move || {
assert_eq!(start_1_recv.recv(), Ok("start"));
let _leased_searcher = &pool1.acquire();
let _leased_searcher = pool1.acquire();
assert!(event_send1.send("1 acquired").is_ok());
assert_eq!(start_1_recv.recv(), Ok("stop"));
assert!(event_send1.send("1 stopped").is_ok());
@@ -262,7 +262,7 @@ mod tests {
threads.push(thread::spawn(move || {
assert_eq!(start_2_recv.recv(), Ok("start"));
let _leased_searcher = &pool2.acquire();
let _leased_searcher = pool2.acquire();
assert!(event_send2.send("2 acquired").is_ok());
assert_eq!(start_2_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);
@@ -271,7 +271,7 @@ mod tests {
threads.push(thread::spawn(move || {
assert_eq!(start_3_recv.recv(), Ok("start"));
let _leased_searcher = &pool3.acquire();
let _leased_searcher = pool3.acquire();
assert!(event_send3.send("3 acquired").is_ok());
assert_eq!(start_3_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);

View File

@@ -39,7 +39,7 @@ pub enum FacetParseError {
/// belonging to a facet also belongs to the ancestor of
/// its facet. In the example above, `/electronics/tv_and_video/`
/// and `/electronics`.
#[derive(Clone, Eq, Hash, PartialEq, Ord, PartialOrd)]
#[derive(Clone, Default, Eq, Hash, PartialEq, Ord, PartialOrd)]
pub struct Facet(String);
impl Facet {

View File

@@ -23,6 +23,15 @@ pub struct FieldEntry {
}
impl FieldEntry {
/// Creates a new field entry given a name and a field type
pub fn new(field_name: String, field_type: FieldType) -> FieldEntry {
assert!(is_valid_field_name(&field_name));
FieldEntry {
name: field_name,
field_type,
}
}
/// Creates a new u64 field entry in the schema, given
/// a name, and some options.
pub fn new_text(field_name: String, text_options: TextOptions) -> FieldEntry {
@@ -308,7 +317,7 @@ mod tests {
assert_eq!("title", field_value.name);
match field_value.field_type {
FieldType::Str(_) => assert!(true),
FieldType::Str(_) => {}
_ => panic!("expected FieldType::Str"),
}
}

View File

@@ -151,20 +151,13 @@ pub use self::flags::{FAST, INDEXED, STORED};
pub use self::int_options::Cardinality;
pub use self::int_options::IntOptions;
use once_cell::sync::Lazy;
use regex::Regex;
/// Regular expression representing the restriction on a valid field names.
pub const FIELD_NAME_PATTERN: &str = r#"^[_a-zA-Z][_\-a-zA-Z0-9]*$"#;
/// Validator for a potential `field_name`.
/// Returns true iff the name can be use for a field name.
/// Returns true if the name can be use for a field name.
///
/// A field name must start by a letter `[a-zA-Z]`.
/// The other characters can be any alphanumic character `[a-ZA-Z0-9]` or `_`.
/// A field name can be any character, must have at least one character
/// and must not start with a `-`.
pub fn is_valid_field_name(field_name: &str) -> bool {
static FIELD_NAME_PTN: Lazy<Regex> = Lazy::new(|| Regex::new(FIELD_NAME_PATTERN).unwrap());
FIELD_NAME_PTN.is_match(field_name)
field_name.len() > 0 && !field_name.starts_with('-')
}
#[cfg(test)]
@@ -174,16 +167,8 @@ mod tests {
#[test]
fn test_is_valid_name() {
assert!(is_valid_field_name("text"));
assert!(is_valid_field_name("text0"));
assert!(!is_valid_field_name("0text"));
assert!(is_valid_field_name("field-name"));
assert!(is_valid_field_name("field_name"));
assert!(!is_valid_field_name("field!name"));
assert!(is_valid_field_name("シャボン玉"));
assert!(!is_valid_field_name("-fieldname"));
assert!(is_valid_field_name("_fieldname"));
assert!(!is_valid_field_name(""));
assert!(!is_valid_field_name("シャボン玉"));
assert!(is_valid_field_name("my_text_field"));
}
}

View File

@@ -779,7 +779,7 @@ mod tests {
}
]"#;
let tmp_schema: Schema =
serde_json::from_str(&schema_content).expect("error while reading json");
serde_json::from_str(schema_content).expect("error while reading json");
for (_field, field_entry) in tmp_schema.fields() {
schema_builder.add_field(field_entry.clone());
}

View File

@@ -137,7 +137,7 @@ fn search_fragments<'a>(
};
fragment = FragmentCandidate::new(next.offset_from);
}
fragment.try_add_token(next, &terms);
fragment.try_add_token(next, terms);
}
if fragment.score > 0.0 {
fragments.push(fragment)
@@ -286,8 +286,8 @@ impl SnippetGenerator {
/// Generates a snippet for the given text.
pub fn snippet(&self, text: &str) -> Snippet {
let fragment_candidates =
search_fragments(&self.tokenizer, &text, &self.terms_text, self.max_num_chars);
select_best_fragment_combination(&fragment_candidates[..], &text)
search_fragments(&self.tokenizer, text, &self.terms_text, self.max_num_chars);
select_best_fragment_combination(&fragment_candidates[..], text)
}
}
@@ -301,9 +301,8 @@ mod tests {
use crate::SnippetGenerator;
use maplit::btreemap;
use std::collections::BTreeMap;
use std::iter::Iterator;
const TEST_TEXT: &'static str = r#"Rust is a systems programming language sponsored by
const TEST_TEXT: &str = r#"Rust is a systems programming language sponsored by
Mozilla which describes it as a "safe, concurrent, practical language", supporting functional and
imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?],
but its designers intend it to provide better memory safety while still maintaining
@@ -330,7 +329,7 @@ Survey in 2016, 2017, and 2018."#;
assert_eq!(first.score, 1.9);
assert_eq!(first.stop_offset, 89);
}
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
let snippet = select_best_fragment_combination(&fragments[..], TEST_TEXT);
assert_eq!(
snippet.fragments,
"Rust is a systems programming language sponsored by\n\
@@ -356,7 +355,7 @@ Survey in 2016, 2017, and 2018."#;
assert_eq!(first.score, 1.0);
assert_eq!(first.stop_offset, 17);
}
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
let snippet = select_best_fragment_combination(&fragments[..], TEST_TEXT);
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems")
}
{
@@ -371,7 +370,7 @@ Survey in 2016, 2017, and 2018."#;
assert_eq!(first.score, 0.9);
assert_eq!(first.stop_offset, 17);
}
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
let snippet = select_best_fragment_combination(&fragments[..], TEST_TEXT);
assert_eq!(snippet.to_html(), "programming <b>language</b>")
}
}
@@ -383,17 +382,17 @@ Survey in 2016, 2017, and 2018."#;
let mut terms = BTreeMap::new();
terms.insert(String::from("c"), 1.0);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
let fragments = search_fragments(&From::from(SimpleTokenizer), text, &terms, 3);
assert_eq!(fragments.len(), 1);
{
let first = fragments.iter().nth(0).unwrap();
let first = &fragments[0];
assert_eq!(first.score, 1.0);
assert_eq!(first.start_offset, 4);
assert_eq!(first.stop_offset, 7);
}
let snippet = select_best_fragment_combination(&fragments[..], &text);
let snippet = select_best_fragment_combination(&fragments[..], text);
assert_eq!(snippet.fragments, "c d");
assert_eq!(snippet.to_html(), "<b>c</b> d");
}
@@ -405,17 +404,17 @@ Survey in 2016, 2017, and 2018."#;
let mut terms = BTreeMap::new();
terms.insert(String::from("f"), 1.0);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
let fragments = search_fragments(&From::from(SimpleTokenizer), text, &terms, 3);
assert_eq!(fragments.len(), 2);
{
let first = fragments.iter().nth(0).unwrap();
let first = &fragments[0];
assert_eq!(first.score, 1.0);
assert_eq!(first.stop_offset, 11);
assert_eq!(first.start_offset, 8);
}
let snippet = select_best_fragment_combination(&fragments[..], &text);
let snippet = select_best_fragment_combination(&fragments[..], text);
assert_eq!(snippet.fragments, "e f");
assert_eq!(snippet.to_html(), "e <b>f</b>");
}
@@ -428,17 +427,17 @@ Survey in 2016, 2017, and 2018."#;
terms.insert(String::from("f"), 1.0);
terms.insert(String::from("a"), 0.9);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 7);
let fragments = search_fragments(&From::from(SimpleTokenizer), text, &terms, 7);
assert_eq!(fragments.len(), 2);
{
let first = fragments.iter().nth(0).unwrap();
let first = &fragments[0];
assert_eq!(first.score, 0.9);
assert_eq!(first.stop_offset, 7);
assert_eq!(first.start_offset, 0);
}
let snippet = select_best_fragment_combination(&fragments[..], &text);
let snippet = select_best_fragment_combination(&fragments[..], text);
assert_eq!(snippet.fragments, "e f g");
assert_eq!(snippet.to_html(), "e <b>f</b> g");
}
@@ -450,11 +449,11 @@ Survey in 2016, 2017, and 2018."#;
let mut terms = BTreeMap::new();
terms.insert(String::from("z"), 1.0);
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
let fragments = search_fragments(&From::from(SimpleTokenizer), text, &terms, 3);
assert_eq!(fragments.len(), 0);
let snippet = select_best_fragment_combination(&fragments[..], &text);
let snippet = select_best_fragment_combination(&fragments[..], text);
assert_eq!(snippet.fragments, "");
assert_eq!(snippet.to_html(), "");
}
@@ -464,10 +463,10 @@ Survey in 2016, 2017, and 2018."#;
let text = "a b c d";
let terms = BTreeMap::new();
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
let fragments = search_fragments(&From::from(SimpleTokenizer), text, &terms, 3);
assert_eq!(fragments.len(), 0);
let snippet = select_best_fragment_combination(&fragments[..], &text);
let snippet = select_best_fragment_combination(&fragments[..], text);
assert_eq!(snippet.fragments, "");
assert_eq!(snippet.to_html(), "");
}

View File

@@ -296,7 +296,7 @@ mod test {
#[test]
fn test_empty() {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage().unwrap();
@@ -325,7 +325,7 @@ mod test {
let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", FAST | INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
@@ -362,7 +362,7 @@ mod test {
let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
@@ -401,7 +401,7 @@ mod test {
let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("name", STORED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests().unwrap();
@@ -439,7 +439,7 @@ mod test {
let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests()?;

View File

@@ -40,7 +40,7 @@ impl fmt::Debug for Checkpoint {
#[cfg(test)]
mod tests {
use std::{io, iter};
use std::io;
use futures::executor::block_on;
use proptest::strategy::{BoxedStrategy, Strategy};
@@ -134,9 +134,7 @@ mod tests {
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_for_tests()?;
index_writer.set_merge_policy(Box::new(NoMergePolicy));
let long_text: String = iter::repeat("abcdefghijklmnopqrstuvwxyz")
.take(1_000)
.collect();
let long_text: String = "abcdefghijklmnopqrstuvwxyz".repeat(1_000);
for _ in 0..20 {
index_writer.add_document(doc!(body=>long_text.clone()));
}
@@ -219,8 +217,7 @@ mod tests {
) -> Option<Checkpoint> {
checkpoints
.into_iter()
.filter(|checkpoint| checkpoint.doc_range.end > target)
.next()
.find(|checkpoint| checkpoint.doc_range.end > target)
}
fn test_skip_index_aux(skip_index: SkipIndex, checkpoints: &[Checkpoint]) {

View File

@@ -163,7 +163,7 @@ impl StoreReader {
let mut curr_checkpoint = checkpoint_block_iter.next();
let mut curr_block = curr_checkpoint
.as_ref()
.map(|checkpoint| self.read_block(&checkpoint).map_err(|e| e.kind())); // map error in order to enable cloning
.map(|checkpoint| self.read_block(checkpoint).map_err(|e| e.kind())); // map error in order to enable cloning
let mut block_start_pos = 0;
let mut num_skipped = 0;
let mut reset_block_pos = false;
@@ -177,7 +177,7 @@ impl StoreReader {
curr_checkpoint = checkpoint_block_iter.next();
curr_block = curr_checkpoint
.as_ref()
.map(|checkpoint| self.read_block(&checkpoint).map_err(|e| e.kind()));
.map(|checkpoint| self.read_block(checkpoint).map_err(|e| e.kind()));
reset_block_pos = true;
num_skipped = 0;
}

Some files were not shown because too many files have changed in this diff Show More