mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-28 21:12:54 +00:00
Compare commits
43 Commits
fix_estima
...
float
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45dbc46ef1 | ||
|
|
c2f1c250f9 | ||
|
|
2063f1717f | ||
|
|
d742275048 | ||
|
|
b9f06bc287 | ||
|
|
8b42c4c126 | ||
|
|
7905965800 | ||
|
|
f60a551890 | ||
|
|
7baa6e3ec5 | ||
|
|
2100ec5d26 | ||
|
|
b3bf9a5716 | ||
|
|
0dc8c458e0 | ||
|
|
5945dbf0bd | ||
|
|
4cf911d56a | ||
|
|
0f5cff762f | ||
|
|
6d9a123cf2 | ||
|
|
0f4a47816a | ||
|
|
b062ab2196 | ||
|
|
a9d2f3db23 | ||
|
|
44e03791f9 | ||
|
|
2d23763e9f | ||
|
|
a24ae8d924 | ||
|
|
927dff5262 | ||
|
|
a695edcc95 | ||
|
|
b4b4f3fa73 | ||
|
|
b50e4b7c20 | ||
|
|
f8686ab1ec | ||
|
|
2fe42719d8 | ||
|
|
fadd784a25 | ||
|
|
0e94213af0 | ||
|
|
0da2a2e70d | ||
|
|
0bcdf3cbbf | ||
|
|
8f647b817f | ||
|
|
a86b0df6f4 | ||
|
|
f842da758c | ||
|
|
97ccd6d712 | ||
|
|
cb252a42af | ||
|
|
d9609dd6b6 | ||
|
|
f03667d967 | ||
|
|
10f10a322f | ||
|
|
f757471077 | ||
|
|
21e0adefda | ||
|
|
ea8e6d7b1d |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
cpp/* linguist-vendored
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,7 +9,6 @@ target/release
|
||||
Cargo.lock
|
||||
benchmark
|
||||
.DS_Store
|
||||
cpp/simdcomp/bitpackingbenchmark
|
||||
*.bk
|
||||
.idea
|
||||
trace.dat
|
||||
|
||||
@@ -11,6 +11,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
|
||||
readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
edition = "2021"
|
||||
rust-version = "1.62"
|
||||
|
||||
[dependencies]
|
||||
oneshot = "0.1.3"
|
||||
@@ -19,11 +20,11 @@ byteorder = "1.4.3"
|
||||
crc32fast = "1.3.2"
|
||||
once_cell = "1.10.0"
|
||||
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
|
||||
tantivy-fst = "0.3.0"
|
||||
tantivy-fst = "0.4.0"
|
||||
memmap2 = { version = "0.5.3", optional = true }
|
||||
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
|
||||
brotli = { version = "3.3.4", optional = true }
|
||||
zstd = { version = "0.11", optional = true }
|
||||
zstd = { version = "0.11", optional = true, default-features = false }
|
||||
snap = { version = "1.0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
log = "0.4.16"
|
||||
|
||||
@@ -58,7 +58,7 @@ Distributed search is out of the scope of Tantivy, but if you are looking for th
|
||||
|
||||
# Getting started
|
||||
|
||||
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
|
||||
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
|
||||
|
||||
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
|
||||
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
|
||||
@@ -81,9 +81,13 @@ There are many ways to support this project.
|
||||
|
||||
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
Tantivy currently requires at least Rust 1.62 or later to compile.
|
||||
|
||||
## Clone and build locally
|
||||
|
||||
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
|
||||
Tantivy compiles on stable Rust.
|
||||
To check out and run tests, you can simply run:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -277,7 +277,7 @@ impl BitSet {
|
||||
self.tinyset(el / 64u32).contains(el % 64)
|
||||
}
|
||||
|
||||
/// Returns the first non-empty `TinySet` associated to a bucket lower
|
||||
/// Returns the first non-empty `TinySet` associated with a bucket lower
|
||||
/// or greater than bucket.
|
||||
///
|
||||
/// Reminder: the tiny set with the bucket `bucket`, represents the
|
||||
|
||||
@@ -34,7 +34,8 @@ impl<T: Deref<Target = [u8]>> HasLen for T {
|
||||
}
|
||||
}
|
||||
|
||||
const HIGHEST_BIT: u64 = 1 << 63;
|
||||
const HIGHEST_BIT_64: u64 = 1 << 63;
|
||||
const HIGHEST_BIT_32: u32 = 1 << 31;
|
||||
|
||||
/// Maps a `i64` to `u64`
|
||||
///
|
||||
@@ -58,13 +59,13 @@ const HIGHEST_BIT: u64 = 1 << 63;
|
||||
/// The reverse mapping is [`u64_to_i64()`].
|
||||
#[inline]
|
||||
pub fn i64_to_u64(val: i64) -> u64 {
|
||||
(val as u64) ^ HIGHEST_BIT
|
||||
(val as u64) ^ HIGHEST_BIT_64
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`i64_to_u64()`].
|
||||
#[inline]
|
||||
pub fn u64_to_i64(val: u64) -> i64 {
|
||||
(val ^ HIGHEST_BIT) as i64
|
||||
(val ^ HIGHEST_BIT_64) as i64
|
||||
}
|
||||
|
||||
/// Maps a `f64` to `u64`
|
||||
@@ -88,7 +89,7 @@ pub fn u64_to_i64(val: u64) -> i64 {
|
||||
pub fn f64_to_u64(val: f64) -> u64 {
|
||||
let bits = val.to_bits();
|
||||
if val.is_sign_positive() {
|
||||
bits ^ HIGHEST_BIT
|
||||
bits ^ HIGHEST_BIT_64
|
||||
} else {
|
||||
!bits
|
||||
}
|
||||
@@ -97,26 +98,148 @@ pub fn f64_to_u64(val: f64) -> u64 {
|
||||
/// Reverse the mapping given by [`f64_to_u64()`].
|
||||
#[inline]
|
||||
pub fn u64_to_f64(val: u64) -> f64 {
|
||||
f64::from_bits(if val & HIGHEST_BIT != 0 {
|
||||
val ^ HIGHEST_BIT
|
||||
f64::from_bits(if val & HIGHEST_BIT_64 != 0 {
|
||||
val ^ HIGHEST_BIT_64
|
||||
} else {
|
||||
!val
|
||||
})
|
||||
}
|
||||
|
||||
/// Maps a `f32` to `u64`
|
||||
///
|
||||
/// # See also
|
||||
/// Similar mapping for f64 [`u64_to_f64()`].
|
||||
#[inline]
|
||||
pub fn f32_to_u64(val: f32) -> u64 {
|
||||
let bits = val.to_bits();
|
||||
let res32 = if val.is_sign_positive() {
|
||||
bits ^ HIGHEST_BIT_32
|
||||
} else {
|
||||
!bits
|
||||
};
|
||||
res32 as u64
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`f32_to_u64()`].
|
||||
#[inline]
|
||||
pub fn u64_to_f32(val: u64) -> f32 {
|
||||
debug_assert!(val <= 1 << 32);
|
||||
let val = val as u32;
|
||||
f32::from_bits(if val & HIGHEST_BIT_32 != 0 {
|
||||
val ^ HIGHEST_BIT_32
|
||||
} else {
|
||||
!val
|
||||
})
|
||||
}
|
||||
|
||||
/// Maps a `f64` to a fixed point representation.
|
||||
/// Lower bound is inclusive, upper bound is exclusive.
|
||||
/// `precision` is the number of bits used to represent the number.
|
||||
///
|
||||
/// This is a lossy, affine transformation. All provided values must be finite and non-NaN.
|
||||
/// Care should be taken to not provide values which would cause loss of precision such as values
|
||||
/// low enough to get sub-normal numbers, value high enough rounding would cause ±Inf to appear, or
|
||||
/// a precision larger than 50b.
|
||||
///
|
||||
/// # See also
|
||||
/// The reverse mapping is [`fixed_point_to_f64()`].
|
||||
#[inline]
|
||||
pub fn f64_to_fixed_point(val: f64, min: f64, max: f64, precision: u8) -> u64 {
|
||||
debug_assert!((1..53).contains(&precision));
|
||||
debug_assert!(min < max);
|
||||
|
||||
let delta = max - min;
|
||||
let mult = (1u64 << precision) as f64;
|
||||
let bucket_size = delta / mult;
|
||||
let upper_bound = f64_next_down(max).min(max - bucket_size);
|
||||
|
||||
// due to different cases of rounding error, we need to enforce upper_bound to be
|
||||
// max-bucket_size, but also that upper_bound < max, which is not given for small enough
|
||||
// bucket_size.
|
||||
let val = val.clamp(min, upper_bound);
|
||||
|
||||
let res = (val - min) / bucket_size;
|
||||
if res.fract() == 0.5 {
|
||||
res as u64
|
||||
} else {
|
||||
// round down when getting x.5
|
||||
res.round() as u64
|
||||
}
|
||||
}
|
||||
|
||||
/// Reverse the mapping given by [`f64_to_fixed_point()`].
|
||||
#[inline]
|
||||
pub fn fixed_point_to_f64(val: u64, min: f64, max: f64, precision: u8) -> f64 {
|
||||
let delta = max - min;
|
||||
let mult = (1u64 << precision) as f64;
|
||||
let bucket_size = delta / mult;
|
||||
|
||||
bucket_size.mul_add(val as f64, min)
|
||||
}
|
||||
|
||||
// taken from rfc/3173-float-next-up-down, commented out part about nan in infinity as it is not
|
||||
// needed.
|
||||
fn f64_next_down(this: f64) -> f64 {
|
||||
const NEG_TINY_BITS: u64 = 0x8000_0000_0000_0001;
|
||||
const CLEAR_SIGN_MASK: u64 = 0x7fff_ffff_ffff_ffff;
|
||||
|
||||
let bits = this.to_bits();
|
||||
// if this.is_nan() || bits == f64::NEG_INFINITY.to_bits() {
|
||||
// return this;
|
||||
// }
|
||||
let abs = bits & CLEAR_SIGN_MASK;
|
||||
let next_bits = if abs == 0 {
|
||||
NEG_TINY_BITS
|
||||
} else if bits == abs {
|
||||
bits - 1
|
||||
} else {
|
||||
bits + 1
|
||||
};
|
||||
f64::from_bits(next_bits)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
|
||||
use super::{
|
||||
f32_to_u64, f64_to_fixed_point, f64_to_u64, fixed_point_to_f64, i64_to_u64, u64_to_f32,
|
||||
u64_to_f64, u64_to_i64, BinarySerializable, FixedSize,
|
||||
};
|
||||
|
||||
fn test_i64_converter_helper(val: i64) {
|
||||
assert_eq!(u64_to_i64(i64_to_u64(val)), val);
|
||||
}
|
||||
|
||||
fn test_f64_converter_helper(val: f64) {
|
||||
assert_eq!(u64_to_f64(f64_to_u64(val)), val);
|
||||
assert_eq!(u64_to_f64(f64_to_u64(val)).total_cmp(&val), Ordering::Equal);
|
||||
}
|
||||
|
||||
fn test_f32_converter_helper(val: f32) {
|
||||
assert_eq!(u64_to_f32(f32_to_u64(val)).total_cmp(&val), Ordering::Equal);
|
||||
}
|
||||
|
||||
fn test_fixed_point_converter_helper(val: f64, min: f64, max: f64, precision: u8) {
|
||||
let bucket_count = 1 << precision;
|
||||
|
||||
let packed = f64_to_fixed_point(val, min, max, precision);
|
||||
|
||||
assert!(packed < bucket_count, "used to much bits");
|
||||
|
||||
let depacked = fixed_point_to_f64(packed, min, max, precision);
|
||||
let repacked = f64_to_fixed_point(depacked, min, max, precision);
|
||||
|
||||
assert_eq!(packed, repacked, "generational loss");
|
||||
|
||||
let error = (val.clamp(min, crate::f64_next_down(max)) - depacked).abs();
|
||||
|
||||
let expected = (max - min) / (bucket_count as f64);
|
||||
assert!(
|
||||
error <= (max - min) / (bucket_count as f64) * 2.0,
|
||||
"error larger than expected"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
|
||||
@@ -125,12 +248,75 @@ pub mod test {
|
||||
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
|
||||
}
|
||||
|
||||
fn fixed_point_bound() -> proptest::num::f64::Any {
|
||||
proptest::num::f64::POSITIVE
|
||||
| proptest::num::f64::NEGATIVE
|
||||
| proptest::num::f64::NORMAL
|
||||
| proptest::num::f64::ZERO
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::NORMAL, proptest::num::f64::NORMAL)) {
|
||||
fn test_f64_converter_monotonicity_proptest((left, right) in (proptest::num::f64::ANY, proptest::num::f64::ANY)) {
|
||||
test_f64_converter_helper(left);
|
||||
test_f64_converter_helper(right);
|
||||
|
||||
let left_u64 = f64_to_u64(left);
|
||||
let right_u64 = f64_to_u64(right);
|
||||
assert_eq!(left_u64 < right_u64, left < right);
|
||||
|
||||
assert_eq!(left_u64.cmp(&right_u64), left.total_cmp(&right));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f32_converter_monotonicity_proptest((left, right) in (proptest::num::f32::ANY, proptest::num::f32::ANY)) {
|
||||
test_f32_converter_helper(left);
|
||||
test_f32_converter_helper(right);
|
||||
|
||||
let left_u64 = f32_to_u64(left);
|
||||
let right_u64 = f32_to_u64(right);
|
||||
assert_eq!(left_u64.cmp(&right_u64), left.total_cmp(&right));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fixed_point_converter_proptest((left, right, min, max, precision) in
|
||||
(fixed_point_bound(), fixed_point_bound(),
|
||||
fixed_point_bound(), fixed_point_bound(),
|
||||
proptest::num::u8::ANY)) {
|
||||
// convert so all input are legal
|
||||
let (min, max) = if min < max {
|
||||
(min, max)
|
||||
} else if min > max {
|
||||
(max, min)
|
||||
} else {
|
||||
return Ok(()); // equals
|
||||
};
|
||||
if 1 > precision || precision >= 50 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let max_full_precision = 53.0 - precision as f64;
|
||||
if (max / min).abs().log2().abs() > max_full_precision {
|
||||
return Ok(());
|
||||
}
|
||||
// we will go in subnormal territories => loss of precision
|
||||
if (((max - min).log2() - precision as f64) as i32) < f64::MIN_EXP {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if (max - min).is_infinite() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
test_fixed_point_converter_helper(left, min, max, precision);
|
||||
test_fixed_point_converter_helper(right, min, max, precision);
|
||||
|
||||
let left_u64 = f64_to_fixed_point(left, min, max, precision);
|
||||
let right_u64 = f64_to_fixed_point(right, min, max, precision);
|
||||
if left < right {
|
||||
assert!(left_u64 <= right_u64);
|
||||
} else if left > right {
|
||||
assert!(left_u64 >= right_u64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,4 +354,27 @@ pub mod test {
|
||||
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));
|
||||
assert!(f64_to_u64(-2.0) < f64_to_u64(-1.5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f32_converter() {
|
||||
test_f32_converter_helper(f32::INFINITY);
|
||||
test_f32_converter_helper(f32::NEG_INFINITY);
|
||||
test_f32_converter_helper(0.0);
|
||||
test_f32_converter_helper(-0.0);
|
||||
test_f32_converter_helper(1.0);
|
||||
test_f32_converter_helper(-1.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f32_order() {
|
||||
assert!(!(f32_to_u64(f32::NEG_INFINITY)..f32_to_u64(f32::INFINITY))
|
||||
.contains(&f32_to_u64(f32::NAN))); // nan is not a number
|
||||
assert!(f32_to_u64(1.5) > f32_to_u64(1.0)); // same exponent, different mantissa
|
||||
assert!(f32_to_u64(2.0) > f32_to_u64(1.0)); // same mantissa, different exponent
|
||||
assert!(f32_to_u64(2.0) > f32_to_u64(1.5)); // different exponent and mantissa
|
||||
assert!(f32_to_u64(1.0) > f32_to_u64(-1.0)); // pos > neg
|
||||
assert!(f32_to_u64(-1.5) < f32_to_u64(-1.0));
|
||||
assert!(f32_to_u64(-2.0) < f32_to_u64(1.0));
|
||||
assert!(f32_to_u64(-2.0) < f32_to_u64(-1.5));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ to get tantivy to fit your use case:
|
||||
|
||||
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
||||
|
||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
|
||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated with segment `D-7`.
|
||||
|
||||
## Merging
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// on its id.
|
||||
//
|
||||
// Note that `tantivy` does nothing to enforce the idea that
|
||||
// there is only one document associated to this id.
|
||||
// there is only one document associated with this id.
|
||||
//
|
||||
// Also you might have noticed that we apply the delete before
|
||||
// having committed. This does not matter really...
|
||||
|
||||
@@ -44,7 +44,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
// - the inverted lists associated with each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
|
||||
// A `Term` is a text token associated with a field.
|
||||
@@ -105,7 +105,7 @@ fn main() -> tantivy::Result<()> {
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
// - the inverted lists associated to each terms and their positions
|
||||
// - the inverted lists associated with each terms and their positions
|
||||
let inverted_index = segment_reader.inverted_index(title)?;
|
||||
|
||||
// This segment posting object is like a cursor over the documents matching the term.
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::ops::RangeInclusive;
|
||||
use tantivy_bitpacker::minmax;
|
||||
|
||||
pub trait Column<T: PartialOrd = u64>: Send + Sync {
|
||||
/// Return the value associated to the given idx.
|
||||
/// Return the value associated with the given idx.
|
||||
///
|
||||
/// This accessor should return as fast as possible.
|
||||
///
|
||||
|
||||
@@ -312,7 +312,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
|
||||
let mut data: Vec<u64> = (200..=20000_u64).collect();
|
||||
let mut data: Vec<u64> = (201..=20000_u64).collect();
|
||||
data.push(1_000_000);
|
||||
let data: VecColumn = data.as_slice().into();
|
||||
|
||||
|
||||
@@ -68,29 +68,37 @@ impl Line {
|
||||
}
|
||||
|
||||
// Same as train, but the intercept is only estimated from provided sample positions
|
||||
pub fn estimate(ys: &dyn Column, sample_positions: &[u64]) -> Self {
|
||||
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
|
||||
let first_val = sample_positions_and_values[0].1;
|
||||
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
|
||||
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
|
||||
Self::train_from(
|
||||
ys,
|
||||
sample_positions
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|pos| (pos, ys.get_val(pos))),
|
||||
first_val,
|
||||
last_val,
|
||||
num_vals,
|
||||
sample_positions_and_values.iter().cloned(),
|
||||
)
|
||||
}
|
||||
|
||||
// Intercept is only computed from provided positions
|
||||
fn train_from(ys: &dyn Column, positions_and_values: impl Iterator<Item = (u64, u64)>) -> Self {
|
||||
let num_vals = if let Some(num_vals) = NonZeroU64::new(ys.num_vals() - 1) {
|
||||
num_vals
|
||||
fn train_from(
|
||||
first_val: u64,
|
||||
last_val: u64,
|
||||
num_vals: u64,
|
||||
positions_and_values: impl Iterator<Item = (u64, u64)>,
|
||||
) -> Self {
|
||||
// TODO replace with let else
|
||||
let idx_last_val = if let Some(idx_last_val) = NonZeroU64::new(num_vals - 1) {
|
||||
idx_last_val
|
||||
} else {
|
||||
return Line::default();
|
||||
};
|
||||
|
||||
let y0 = ys.get_val(0);
|
||||
let y1 = ys.get_val(num_vals.get());
|
||||
let y0 = first_val;
|
||||
let y1 = last_val;
|
||||
|
||||
// We first independently pick our slope.
|
||||
let slope = compute_slope(y0, y1, num_vals);
|
||||
let slope = compute_slope(y0, y1, idx_last_val);
|
||||
|
||||
// We picked our slope. Note that it does not have to be perfect.
|
||||
// Now we need to compute the best intercept.
|
||||
@@ -138,8 +146,12 @@ impl Line {
|
||||
/// This function is only invariable by translation if all of the
|
||||
/// `ys` are packaged into half of the space. (See heuristic below)
|
||||
pub fn train(ys: &dyn Column) -> Self {
|
||||
let first_val = ys.iter().next().unwrap();
|
||||
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
|
||||
Self::train_from(
|
||||
ys,
|
||||
first_val,
|
||||
last_val,
|
||||
ys.num_vals(),
|
||||
ys.iter().enumerate().map(|(pos, val)| (pos as u64, val)),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -126,18 +126,20 @@ impl FastFieldCodec for LinearCodec {
|
||||
return None; // disable compressor for this case
|
||||
}
|
||||
|
||||
// let's sample at 0%, 5%, 10% .. 95%, 100%
|
||||
let num_vals = column.num_vals() as f32 / 100.0;
|
||||
let sample_positions = (0..20)
|
||||
.map(|pos| (num_vals * pos as f32 * 5.0) as u64)
|
||||
.collect::<Vec<_>>();
|
||||
let limit_num_vals = column.num_vals().min(100_000);
|
||||
|
||||
let line = Line::estimate(column, &sample_positions);
|
||||
let num_samples = 100;
|
||||
let step_size = (limit_num_vals / num_samples).max(1); // 20 samples
|
||||
let mut sample_positions_and_values: Vec<_> = Vec::new();
|
||||
for (pos, val) in column.iter().enumerate().step_by(step_size as usize) {
|
||||
sample_positions_and_values.push((pos as u64, val));
|
||||
}
|
||||
|
||||
let estimated_bit_width = sample_positions
|
||||
let line = Line::estimate(&sample_positions_and_values);
|
||||
|
||||
let estimated_bit_width = sample_positions_and_values
|
||||
.into_iter()
|
||||
.map(|pos| {
|
||||
let actual_value = column.get_val(pos);
|
||||
.map(|(pos, actual_value)| {
|
||||
let interpolated_val = line.eval(pos as u64);
|
||||
actual_value.wrapping_sub(interpolated_val)
|
||||
})
|
||||
@@ -146,6 +148,7 @@ impl FastFieldCodec for LinearCodec {
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Extrapolate to whole column
|
||||
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
|
||||
let num_bits_uncompressed = 64 * column.num_vals();
|
||||
Some(num_bits as f32 / num_bits_uncompressed as f32)
|
||||
|
||||
42
fastfield_codecs/src/monotonic_mapping_u128.rs
Normal file
42
fastfield_codecs/src/monotonic_mapping_u128.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
|
||||
pub trait MonotonicallyMappableToU128: 'static + PartialOrd + Copy + Send + Sync {
|
||||
/// Converts a value to u128.
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
fn to_u128(self) -> u128;
|
||||
|
||||
/// Converts a value from u128
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
/// **Note: To be used for converting encoded Term, Posting values.**
|
||||
fn from_u128(val: u128) -> Self;
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU128 for u128 {
|
||||
fn to_u128(self) -> u128 {
|
||||
self
|
||||
}
|
||||
|
||||
fn from_u128(val: u128) -> Self {
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
impl MonotonicallyMappableToU128 for IpAddr {
|
||||
fn to_u128(self) -> u128 {
|
||||
ip_to_u128(self)
|
||||
}
|
||||
|
||||
fn from_u128(val: u128) -> Self {
|
||||
IpAddr::from(val.to_be_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
fn ip_to_u128(ip_addr: IpAddr) -> u128 {
|
||||
let ip_addr_v6: Ipv6Addr = match ip_addr {
|
||||
IpAddr::V4(v4) => v4.to_ipv6_mapped(),
|
||||
IpAddr::V6(v6) => v6,
|
||||
};
|
||||
u128::from_be_bytes(ip_addr_v6.octets())
|
||||
}
|
||||
@@ -452,7 +452,7 @@ fn intermediate_buckets_to_final_buckets_fill_gaps(
|
||||
histogram_req: &HistogramAggregation,
|
||||
sub_aggregation: &AggregationsInternal,
|
||||
) -> crate::Result<Vec<BucketEntry>> {
|
||||
// Generate the the full list of buckets without gaps.
|
||||
// Generate the full list of buckets without gaps.
|
||||
//
|
||||
// The bounds are the min max from the current buckets, optionally extended by
|
||||
// extended_bounds from the request
|
||||
|
||||
@@ -323,8 +323,8 @@ impl SegmentRangeCollector {
|
||||
/// Converts the user provided f64 range value to fast field value space.
|
||||
///
|
||||
/// Internally fast field values are always stored as u64.
|
||||
/// If the fast field has u64 [1,2,5], these values are stored as is in the fast field.
|
||||
/// A fast field with f64 [1.0, 2.0, 5.0] is converted to u64 space, using a
|
||||
/// If the fast field has u64 `[1, 2, 5]`, these values are stored as is in the fast field.
|
||||
/// A fast field with f64 `[1.0, 2.0, 5.0]` is converted to u64 space, using a
|
||||
/// monotonic mapping function, so the order is preserved.
|
||||
///
|
||||
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using
|
||||
|
||||
@@ -38,7 +38,7 @@ pub trait CustomSegmentScorer<TScore>: 'static {
|
||||
pub trait CustomScorer<TScore>: Sync {
|
||||
/// Type of the associated [`CustomSegmentScorer`].
|
||||
type Child: CustomSegmentScorer<TScore>;
|
||||
/// Builds a child scorer for a specific segment. The child scorer is associated to
|
||||
/// Builds a child scorer for a specific segment. The child scorer is associated with
|
||||
/// a specific segment.
|
||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child>;
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// // a document can be associated to any number of facets
|
||||
/// // a document can be associated with any number of facets
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// facet => Facet::from("/lang/en"),
|
||||
@@ -338,11 +338,7 @@ impl SegmentCollector for FacetSegmentCollector {
|
||||
let mut previous_collapsed_ord: usize = usize::MAX;
|
||||
for &facet_ord in &self.facet_ords_buf {
|
||||
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
|
||||
self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
|
||||
0
|
||||
} else {
|
||||
1
|
||||
};
|
||||
self.counts[collapsed_ord] += u64::from(collapsed_ord != previous_collapsed_ord);
|
||||
previous_collapsed_ord = collapsed_ord;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ impl HistogramCollector {
|
||||
/// The scale/range of the histogram is not dynamic. It is required to
|
||||
/// define it by supplying following parameter:
|
||||
/// - `min_value`: the minimum value that can be recorded in the histogram.
|
||||
/// - `bucket_width`: the length of the interval that is associated to each buckets.
|
||||
/// - `bucket_width`: the length of the interval that is associated with each buckets.
|
||||
/// - `num_buckets`: The overall number of buckets.
|
||||
///
|
||||
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets *
|
||||
|
||||
@@ -142,7 +142,7 @@ pub trait Collector: Sync + Send {
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
type Fruit: Fruit;
|
||||
|
||||
/// Type of the `SegmentCollector` associated to this collector.
|
||||
/// Type of the `SegmentCollector` associated with this collector.
|
||||
type Child: SegmentCollector;
|
||||
|
||||
/// `set_segment` is called before beginning to enumerate
|
||||
@@ -156,7 +156,7 @@ pub trait Collector: Sync + Send {
|
||||
/// Returns true iff the collector requires to compute scores for documents.
|
||||
fn requires_scoring(&self) -> bool;
|
||||
|
||||
/// Combines the fruit associated to the collection of each segments
|
||||
/// Combines the fruit associated with the collection of each segments
|
||||
/// into one fruit.
|
||||
fn merge_fruits(
|
||||
&self,
|
||||
|
||||
@@ -693,7 +693,7 @@ impl Collector for TopDocs {
|
||||
}
|
||||
}
|
||||
|
||||
/// Segment Collector associated to `TopDocs`.
|
||||
/// Segment Collector associated with `TopDocs`.
|
||||
pub struct TopScoreSegmentCollector(TopSegmentCollector<Score>);
|
||||
|
||||
impl SegmentCollector for TopScoreSegmentCollector {
|
||||
|
||||
@@ -40,7 +40,7 @@ pub trait ScoreTweaker<TScore>: Sync {
|
||||
/// Type of the associated [`ScoreSegmentTweaker`].
|
||||
type Child: ScoreSegmentTweaker<TScore>;
|
||||
|
||||
/// Builds a child tweaker for a specific segment. The child scorer is associated to
|
||||
/// Builds a child tweaker for a specific segment. The child scorer is associated with
|
||||
/// a specific segment.
|
||||
fn segment_tweaker(&self, segment_reader: &SegmentReader) -> Result<Self::Child>;
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ use crate::error::{DataCorruption, TantivyError};
|
||||
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
|
||||
use crate::indexer::segment_updater::save_metas;
|
||||
use crate::reader::{IndexReader, IndexReaderBuilder};
|
||||
use crate::schema::{Field, FieldType, Schema};
|
||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
||||
use crate::IndexWriter;
|
||||
|
||||
@@ -152,9 +152,7 @@ impl IndexBuilder {
|
||||
/// This should only be used for unit tests.
|
||||
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
|
||||
let ram_directory = RamDirectory::create();
|
||||
Ok(self
|
||||
.create(ram_directory)
|
||||
.expect("Creating a RamDirectory should never fail"))
|
||||
self.create(ram_directory)
|
||||
}
|
||||
|
||||
/// Creates a new index in a given filepath.
|
||||
@@ -228,10 +226,44 @@ impl IndexBuilder {
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self) -> crate::Result<()> {
|
||||
if let Some(schema) = self.schema.as_ref() {
|
||||
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
|
||||
let schema_field = schema.get_field(&sort_by_field.field).ok_or_else(|| {
|
||||
TantivyError::InvalidArgument(format!(
|
||||
"Field to sort index {} not found in schema",
|
||||
sort_by_field.field
|
||||
))
|
||||
})?;
|
||||
let entry = schema.get_field_entry(schema_field);
|
||||
if !entry.is_fast() {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Field {} is no fast field. Field needs to be a single value fast field \
|
||||
to be used to sort an index",
|
||||
sort_by_field.field
|
||||
)));
|
||||
}
|
||||
if entry.field_type().fastfield_cardinality() != Some(Cardinality::SingleValue) {
|
||||
return Err(TantivyError::InvalidArgument(format!(
|
||||
"Only single value fast field Cardinality supported for sorting index {}",
|
||||
sort_by_field.field
|
||||
)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(TantivyError::InvalidArgument(
|
||||
"no schema passed".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new index given an implementation of the trait `Directory`.
|
||||
///
|
||||
/// If a directory previously existed, it will be erased.
|
||||
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
|
||||
self.validate()?;
|
||||
let dir = dir.into();
|
||||
let directory = ManagedDirectory::wrap(dir)?;
|
||||
save_new_metas(
|
||||
|
||||
@@ -130,7 +130,7 @@ impl SegmentMeta {
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
/// associated with a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
let mut path = self.id().uuid_string();
|
||||
path.push_str(&*match component {
|
||||
@@ -326,13 +326,13 @@ pub struct IndexMeta {
|
||||
/// `IndexSettings` to configure index options.
|
||||
#[serde(default)]
|
||||
pub index_settings: IndexSettings,
|
||||
/// List of `SegmentMeta` information associated to each finalized segment of the index.
|
||||
/// List of `SegmentMeta` information associated with each finalized segment of the index.
|
||||
pub segments: Vec<SegmentMeta>,
|
||||
/// Index `Schema`
|
||||
pub schema: Schema,
|
||||
/// Opstamp associated to the last `commit` operation.
|
||||
/// Opstamp associated with the last `commit` operation.
|
||||
pub opstamp: Opstamp,
|
||||
/// Payload associated to the last commit.
|
||||
/// Payload associated with the last commit.
|
||||
///
|
||||
/// Upon commit, clients can optionally add a small `String` payload to their commit
|
||||
/// to help identify this commit.
|
||||
|
||||
@@ -9,18 +9,17 @@ use crate::schema::{IndexRecordOption, Term};
|
||||
use crate::termdict::TermDictionary;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated to a specific field.
|
||||
/// the inverted index associated with a specific field.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// It is safe to delete the segment associated to
|
||||
/// It is safe to delete the segment associated with
|
||||
/// an `InvertedIndexReader`. As long as it is open,
|
||||
/// the `FileSlice` it is relying on should
|
||||
/// the [`FileSlice`] it is relying on should
|
||||
/// stay available.
|
||||
///
|
||||
///
|
||||
/// `InvertedIndexReader` are created by calling
|
||||
/// the `SegmentReader`'s [`.inverted_index(...)`] method
|
||||
/// [`SegmentReader::inverted_index()`](crate::SegmentReader::inverted_index).
|
||||
pub struct InvertedIndexReader {
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
@@ -30,7 +29,7 @@ pub struct InvertedIndexReader {
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))] // for symmetry
|
||||
#[allow(clippy::needless_pass_by_value)] // for symmetry
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
postings_file_slice: FileSlice,
|
||||
@@ -75,7 +74,7 @@ impl InvertedIndexReader {
|
||||
///
|
||||
/// This is useful for enumerating through a list of terms,
|
||||
/// and consuming the associated posting lists while avoiding
|
||||
/// reallocating a `BlockSegmentPostings`.
|
||||
/// reallocating a [`BlockSegmentPostings`].
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
@@ -96,7 +95,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub fn read_block_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
@@ -110,7 +109,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a block postings given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub fn read_block_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
@@ -130,7 +129,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a posting object given a `term_info`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub fn read_postings_from_terminfo(
|
||||
&self,
|
||||
term_info: &TermInfo,
|
||||
@@ -164,12 +163,12 @@ impl InvertedIndexReader {
|
||||
/// or `None` if the term has never been encountered and indexed.
|
||||
///
|
||||
/// If the field was not indexed with the indexing options that cover
|
||||
/// the requested options, the returned `SegmentPostings` the method does not fail
|
||||
/// the requested options, the returned [`SegmentPostings`] the method does not fail
|
||||
/// and returns a `SegmentPostings` with as much information as possible.
|
||||
///
|
||||
/// For instance, requesting `IndexRecordOption::Freq` for a
|
||||
/// `TextIndexingOptions` that does not index position will return a `SegmentPostings`
|
||||
/// with `DocId`s and frequencies.
|
||||
/// For instance, requesting [`IndexRecordOption::WithFreqs`] for a
|
||||
/// [`TextOptions`](crate::schema::TextOptions) that does not index position
|
||||
/// will return a [`SegmentPostings`] with `DocId`s and frequencies.
|
||||
pub fn read_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
@@ -211,7 +210,7 @@ impl InvertedIndexReader {
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
/// Most user should prefer using `read_postings` instead.
|
||||
/// Most users should prefer using [`Self::read_postings()`] instead.
|
||||
pub async fn warm_postings(
|
||||
&self,
|
||||
term: &Term,
|
||||
|
||||
@@ -69,7 +69,7 @@ pub struct Searcher {
|
||||
}
|
||||
|
||||
impl Searcher {
|
||||
/// Returns the `Index` associated to the `Searcher`
|
||||
/// Returns the `Index` associated with the `Searcher`
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.inner.index
|
||||
}
|
||||
@@ -108,7 +108,7 @@ impl Searcher {
|
||||
store_reader.get_async(doc_address.doc_id).await
|
||||
}
|
||||
|
||||
/// Access the schema associated to the index of this searcher.
|
||||
/// Access the schema associated with the index of this searcher.
|
||||
pub fn schema(&self) -> &Schema {
|
||||
&self.inner.schema
|
||||
}
|
||||
@@ -161,11 +161,11 @@ impl Searcher {
|
||||
///
|
||||
/// Search works as follows :
|
||||
///
|
||||
/// First the weight object associated to the query is created.
|
||||
/// First the weight object associated with the query is created.
|
||||
///
|
||||
/// Then, the query loops over the segments and for each segment :
|
||||
/// - setup the collector and informs it that the segment being processed has changed.
|
||||
/// - creates a SegmentCollector for collecting documents associated to the segment
|
||||
/// - creates a SegmentCollector for collecting documents associated with the segment
|
||||
/// - creates a `Scorer` object associated for this segment
|
||||
/// - iterate through the matched documents and push them to the segment collector.
|
||||
///
|
||||
|
||||
@@ -70,7 +70,7 @@ impl Segment {
|
||||
/// Returns the relative path of a component of our segment.
|
||||
///
|
||||
/// It just joins the segment id with the extension
|
||||
/// associated to a segment component.
|
||||
/// associated with a segment component.
|
||||
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
|
||||
self.meta.relative_path(component)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::slice;
|
||||
/// except the delete component that takes an `segment_uuid`.`delete_opstamp`.`component_extension`
|
||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||
pub enum SegmentComponent {
|
||||
/// Postings (or inverted list). Sorted lists of document ids, associated to terms
|
||||
/// Postings (or inverted list). Sorted lists of document ids, associated with terms
|
||||
Postings,
|
||||
/// Positions of terms in each document.
|
||||
Positions,
|
||||
|
||||
@@ -89,7 +89,7 @@ impl SegmentReader {
|
||||
&self.fast_fields_readers
|
||||
}
|
||||
|
||||
/// Accessor to the `FacetReader` associated to a given `Field`.
|
||||
/// Accessor to the `FacetReader` associated with a given `Field`.
|
||||
pub fn facet_reader(&self, field: Field) -> crate::Result<FacetReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
|
||||
@@ -208,18 +208,18 @@ impl SegmentReader {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a field reader associated to the field given in argument.
|
||||
/// Returns a field reader associated with the field given in argument.
|
||||
/// If the field was not present in the index during indexing time,
|
||||
/// the InvertedIndexReader is empty.
|
||||
///
|
||||
/// The field reader is in charge of iterating through the
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
/// term dictionary associated with a specific field,
|
||||
/// and opening the posting list associated with any term.
|
||||
///
|
||||
/// If the field is not marked as index, a warn is logged and an empty `InvertedIndexReader`
|
||||
/// If the field is not marked as index, a warning is logged and an empty `InvertedIndexReader`
|
||||
/// is returned.
|
||||
/// Similarly if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index. an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
/// Similarly, if the field is marked as indexed but no term has been indexed for the given
|
||||
/// index, an empty `InvertedIndexReader` is returned (but no warning is logged).
|
||||
pub fn inverted_index(&self, field: Field) -> crate::Result<Arc<InvertedIndexReader>> {
|
||||
if let Some(inv_idx_reader) = self
|
||||
.inv_idx_reader_cache
|
||||
@@ -241,7 +241,7 @@ impl SegmentReader {
|
||||
|
||||
if postings_file_opt.is_none() || record_option_opt.is_none() {
|
||||
// no documents in the segment contained this field.
|
||||
// As a result, no data is associated to the inverted index.
|
||||
// As a result, no data is associated with the inverted index.
|
||||
//
|
||||
// Returns an empty inverted index.
|
||||
let record_option = record_option_opt.unwrap_or(IndexRecordOption::Basic);
|
||||
|
||||
@@ -154,14 +154,14 @@ impl CompositeFile {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `FileSlice` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
/// Returns the `FileSlice` associated with
|
||||
/// a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read(&self, field: Field) -> Option<FileSlice> {
|
||||
self.open_read_with_idx(field, 0)
|
||||
}
|
||||
|
||||
/// Returns the `FileSlice` associated
|
||||
/// to a given `Field` and stored in a `CompositeFile`.
|
||||
/// Returns the `FileSlice` associated with
|
||||
/// a given `Field` and stored in a `CompositeFile`.
|
||||
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<FileSlice> {
|
||||
self.offsets_index
|
||||
.get(&FileAddr { field, idx })
|
||||
|
||||
@@ -39,7 +39,7 @@ impl RetryPolicy {
|
||||
|
||||
/// The `DirectoryLock` is an object that represents a file lock.
|
||||
///
|
||||
/// It is associated to a lock file, that gets deleted on `Drop.`
|
||||
/// It is associated with a lock file, that gets deleted on `Drop.`
|
||||
pub struct DirectoryLock(Box<dyn Send + Sync + 'static>);
|
||||
|
||||
struct DirectoryLockGuard {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::ops::{Deref, Range};
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, io};
|
||||
|
||||
use async_trait::async_trait;
|
||||
@@ -8,16 +8,13 @@ use stable_deref_trait::StableDeref;
|
||||
|
||||
use crate::directory::OwnedBytes;
|
||||
|
||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Objects that represents files sections in tantivy.
|
||||
///
|
||||
/// By contract, whatever happens to the directory file, as long as a FileHandle
|
||||
/// is alive, the data associated with it cannot be altered or destroyed.
|
||||
///
|
||||
/// The underlying behavior is therefore specific to the `Directory` that created it.
|
||||
/// Despite its name, a `FileSlice` may or may not directly map to an actual file
|
||||
/// The underlying behavior is therefore specific to the [`Directory`](crate::Directory) that
|
||||
/// created it. Despite its name, a [`FileSlice`] may or may not directly map to an actual file
|
||||
/// on the filesystem.
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -9,7 +9,7 @@ use crc32fast::Hasher;
|
||||
|
||||
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
|
||||
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||
const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
|
||||
|
||||
// Watches a file and executes registered callbacks when the file is modified.
|
||||
pub struct FileWatcher {
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, BufWriter, Read, Seek, Write};
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
use std::{fmt, result};
|
||||
|
||||
use fs2::FileExt;
|
||||
@@ -18,10 +18,13 @@ use crate::directory::error::{
|
||||
};
|
||||
use crate::directory::file_watcher::FileWatcher;
|
||||
use crate::directory::{
|
||||
AntiCallToken, ArcBytes, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes,
|
||||
TerminatingWrite, WatchCallback, WatchHandle, WeakArcBytes, WritePtr,
|
||||
AntiCallToken, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes, TerminatingWrite,
|
||||
WatchCallback, WatchHandle, WritePtr,
|
||||
};
|
||||
|
||||
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
|
||||
|
||||
/// Create a default io error given a string.
|
||||
pub(crate) fn make_io_err(msg: String) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, msg)
|
||||
@@ -301,7 +304,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
|
||||
"Path {:?} does not have parent directory.",
|
||||
)
|
||||
})?;
|
||||
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
|
||||
let mut tempfile = tempfile::Builder::new().tempfile_in(parent_path)?;
|
||||
tempfile.write_all(content)?;
|
||||
tempfile.flush()?;
|
||||
tempfile.as_file_mut().sync_data()?;
|
||||
@@ -334,7 +337,7 @@ impl Directory for MmapDirectory {
|
||||
Ok(Arc::new(owned_bytes))
|
||||
}
|
||||
|
||||
/// Any entry associated to the path in the mmap will be
|
||||
/// Any entry associated with the path in the mmap will be
|
||||
/// removed before the file is deleted.
|
||||
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
|
||||
let full_path = self.resolve_path(path);
|
||||
@@ -607,9 +610,11 @@ mod tests {
|
||||
assert!(num_segments <= 4);
|
||||
let num_components_except_deletes_and_tempstore =
|
||||
crate::core::SegmentComponent::iterator().len() - 2;
|
||||
assert_eq!(
|
||||
num_segments * num_components_except_deletes_and_tempstore,
|
||||
mmap_directory.get_cache_info().mmapped.len()
|
||||
let num_mmapped = mmap_directory.get_cache_info().mmapped.len();
|
||||
assert!(
|
||||
num_mmapped <= num_segments * num_components_except_deletes_and_tempstore,
|
||||
"Expected at most {} mmapped files, got {num_mmapped}",
|
||||
num_segments * num_components_except_deletes_and_tempstore
|
||||
);
|
||||
}
|
||||
// This test failed on CI. The last Mmap is dropped from the merging thread so there might
|
||||
|
||||
@@ -26,7 +26,6 @@ pub use ownedbytes::OwnedBytes;
|
||||
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
|
||||
pub use self::directory::{Directory, DirectoryClone, DirectoryLock};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
|
||||
pub use self::file_slice::{FileHandle, FileSlice};
|
||||
pub use self::ram_directory::RamDirectory;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
@@ -31,36 +32,39 @@ impl BytesFastFieldReader {
|
||||
Ok(BytesFastFieldReader { idx_reader, values })
|
||||
}
|
||||
|
||||
fn range(&self, doc: DocId) -> (usize, usize) {
|
||||
fn range(&self, doc: DocId) -> Range<u64> {
|
||||
let idx = doc as u64;
|
||||
let start = self.idx_reader.get_val(idx) as usize;
|
||||
let stop = self.idx_reader.get_val(idx + 1) as usize;
|
||||
(start, stop)
|
||||
let start = self.idx_reader.get_val(idx);
|
||||
let end = self.idx_reader.get_val(idx + 1);
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the bytes associated to the given `doc`
|
||||
/// Returns the bytes associated with the given `doc`
|
||||
pub fn get_bytes(&self, doc: DocId) -> &[u8] {
|
||||
let (start, stop) = self.range(doc);
|
||||
&self.values.as_slice()[start..stop]
|
||||
let range = self.range(doc);
|
||||
&self.values.as_slice()[range.start as usize..range.end as usize]
|
||||
}
|
||||
|
||||
/// Returns the length of the bytes associated to the given `doc`
|
||||
pub fn num_bytes(&self, doc: DocId) -> usize {
|
||||
let (start, stop) = self.range(doc);
|
||||
stop - start
|
||||
/// Returns the length of the bytes associated with the given `doc`
|
||||
pub fn num_bytes(&self, doc: DocId) -> u64 {
|
||||
let range = self.range(doc);
|
||||
range.end - range.start
|
||||
}
|
||||
|
||||
/// Returns the overall number of bytes in this bytes fast field.
|
||||
pub fn total_num_bytes(&self) -> usize {
|
||||
self.values.len()
|
||||
pub fn total_num_bytes(&self) -> u64 {
|
||||
self.values.len() as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl MultiValueLength for BytesFastFieldReader {
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64> {
|
||||
self.range(doc_id)
|
||||
}
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_bytes(doc_id) as u64
|
||||
self.num_bytes(doc_id)
|
||||
}
|
||||
fn get_total_len(&self) -> u64 {
|
||||
self.total_num_bytes() as u64
|
||||
self.total_num_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::DocId;
|
||||
///
|
||||
/// Once acquired, writing is done by calling
|
||||
/// [`.add_document_val(&[u8])`](BytesFastFieldWriter::add_document_val)
|
||||
/// once per document, even if there are no bytes associated to it.
|
||||
/// once per document, even if there are no bytes associated with it.
|
||||
pub struct BytesFastFieldWriter {
|
||||
field: Field,
|
||||
vals: Vec<u8>,
|
||||
@@ -45,7 +45,7 @@ impl BytesFastFieldWriter {
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
self.vals.capacity() + self.doc_index.capacity() * std::mem::size_of::<u64>()
|
||||
}
|
||||
/// Access the field associated to the `BytesFastFieldWriter`
|
||||
/// Access the field associated with the `BytesFastFieldWriter`
|
||||
pub fn field(&self) -> Field {
|
||||
self.field
|
||||
}
|
||||
@@ -67,7 +67,7 @@ impl BytesFastFieldWriter {
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the bytes associated to a document.
|
||||
/// Register the bytes associated with a document.
|
||||
///
|
||||
/// The method returns the `DocId` of the document that was
|
||||
/// just written.
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::termdict::{TermDictionary, TermOrdinal};
|
||||
use crate::DocId;
|
||||
|
||||
/// The facet reader makes it possible to access the list of
|
||||
/// facets associated to a given document in a specific
|
||||
/// facets associated with a given document in a specific
|
||||
/// segment.
|
||||
///
|
||||
/// Rather than manipulating `Facet` object directly, the API
|
||||
@@ -58,7 +58,7 @@ impl FacetReader {
|
||||
&self.term_dict
|
||||
}
|
||||
|
||||
/// Given a term ordinal returns the term associated to it.
|
||||
/// Given a term ordinal returns the term associated with it.
|
||||
pub fn facet_from_ord(
|
||||
&mut self,
|
||||
facet_ord: TermOrdinal,
|
||||
@@ -74,7 +74,7 @@ impl FacetReader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return the list of facet ordinals associated to a document.
|
||||
/// Return the list of facet ordinals associated with a document.
|
||||
pub fn facet_ords(&self, doc: DocId, output: &mut Vec<u64>) {
|
||||
self.term_ords.get_vals(doc, output);
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveB
|
||||
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
|
||||
pub use self::error::{FastFieldNotAvailableError, Result};
|
||||
pub use self::facet_reader::FacetReader;
|
||||
pub(crate) use self::multivalued::MultivalueStartIndex;
|
||||
pub(crate) use self::multivalued::{get_fastfield_codecs_for_multivalue, MultivalueStartIndex};
|
||||
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub(crate) use self::readers::{type_and_cardinality, FastType};
|
||||
@@ -47,7 +47,9 @@ mod writer;
|
||||
/// Trait for `BytesFastFieldReader` and `MultiValuedFastFieldReader` to return the length of data
|
||||
/// for a doc_id
|
||||
pub trait MultiValueLength {
|
||||
/// returns the num of values associated to a doc_id
|
||||
/// returns the positions for a docid
|
||||
fn get_range(&self, doc_id: DocId) -> std::ops::Range<u64>;
|
||||
/// returns the num of values associated with a doc_id
|
||||
fn get_len(&self, doc_id: DocId) -> u64;
|
||||
/// returns the sum of num values for all doc_ids
|
||||
fn get_total_len(&self) -> u64;
|
||||
|
||||
@@ -1,10 +1,22 @@
|
||||
mod reader;
|
||||
mod writer;
|
||||
|
||||
use fastfield_codecs::FastFieldCodecType;
|
||||
|
||||
pub use self::reader::MultiValuedFastFieldReader;
|
||||
pub use self::writer::MultiValuedFastFieldWriter;
|
||||
pub(crate) use self::writer::MultivalueStartIndex;
|
||||
|
||||
/// The valid codecs for multivalue values excludes the linear interpolation codec.
|
||||
///
|
||||
/// This limitation is only valid for the values, not the offset index of the multivalue index.
|
||||
pub(crate) fn get_fastfield_codecs_for_multivalue() -> [FastFieldCodecType; 2] {
|
||||
[
|
||||
FastFieldCodecType::Bitpacked,
|
||||
FastFieldCodecType::BlockwiseLinear,
|
||||
]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use proptest::strategy::Strategy;
|
||||
|
||||
@@ -30,8 +30,8 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `[start, end)`, such that the values associated
|
||||
/// to the given document are `start..end`.
|
||||
/// Returns `[start, end)`, such that the values associated with
|
||||
/// the given document are `start..end`.
|
||||
#[inline]
|
||||
fn range(&self, doc: DocId) -> Range<u64> {
|
||||
let idx = doc as u64;
|
||||
@@ -40,7 +40,7 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
start..end
|
||||
}
|
||||
|
||||
/// Returns the array of values associated to the given `doc`.
|
||||
/// Returns the array of values associated with the given `doc`.
|
||||
#[inline]
|
||||
fn get_vals_for_range(&self, range: Range<u64>, vals: &mut Vec<Item>) {
|
||||
let len = (range.end - range.start) as usize;
|
||||
@@ -48,7 +48,7 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
self.vals_reader.get_range(range.start, &mut vals[..]);
|
||||
}
|
||||
|
||||
/// Returns the array of values associated to the given `doc`.
|
||||
/// Returns the array of values associated with the given `doc`.
|
||||
#[inline]
|
||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||
let range = self.range(doc);
|
||||
@@ -88,6 +88,9 @@ impl<Item: FastValue> MultiValuedFastFieldReader<Item> {
|
||||
}
|
||||
|
||||
impl<Item: FastValue> MultiValueLength for MultiValuedFastFieldReader<Item> {
|
||||
fn get_range(&self, doc_id: DocId) -> Range<u64> {
|
||||
self.range(doc_id)
|
||||
}
|
||||
fn get_len(&self, doc_id: DocId) -> u64 {
|
||||
self.num_vals(doc_id) as u64
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use std::io;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use fastfield_codecs::{Column, MonotonicallyMappableToU64, VecColumn};
|
||||
use fnv::FnvHashMap;
|
||||
|
||||
use super::get_fastfield_codecs_for_multivalue;
|
||||
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer, FastFieldType};
|
||||
use crate::indexer::doc_id_mapping::DocIdMapping;
|
||||
use crate::postings::UnorderedTermId;
|
||||
@@ -62,7 +62,7 @@ impl MultiValuedFastFieldWriter {
|
||||
+ self.doc_index.capacity() * std::mem::size_of::<u64>()
|
||||
}
|
||||
|
||||
/// Access the field associated to the `MultiValuedFastFieldWriter`
|
||||
/// Access the field associated with the `MultiValuedFastFieldWriter`
|
||||
pub fn field(&self) -> Field {
|
||||
self.field
|
||||
}
|
||||
@@ -195,7 +195,12 @@ impl MultiValuedFastFieldWriter {
|
||||
}
|
||||
}
|
||||
let col = VecColumn::from(&values[..]);
|
||||
serializer.create_auto_detect_u64_fast_field_with_idx(self.field, col, 1)?;
|
||||
serializer.create_auto_detect_u64_fast_field_with_idx_and_codecs(
|
||||
self.field,
|
||||
col,
|
||||
1,
|
||||
&get_fastfield_codecs_for_multivalue(),
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -204,112 +209,59 @@ impl MultiValuedFastFieldWriter {
|
||||
pub(crate) struct MultivalueStartIndex<'a, C: Column> {
|
||||
column: &'a C,
|
||||
doc_id_map: &'a DocIdMapping,
|
||||
min_max_opt: Mutex<Option<(u64, u64)>>,
|
||||
random_seeker: Mutex<MultivalueStartIndexRandomSeeker<'a, C>>,
|
||||
}
|
||||
|
||||
struct MultivalueStartIndexRandomSeeker<'a, C: Column> {
|
||||
seek_head: MultivalueStartIndexIter<'a, C>,
|
||||
seek_next_id: u64,
|
||||
}
|
||||
impl<'a, C: Column> MultivalueStartIndexRandomSeeker<'a, C> {
|
||||
fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
|
||||
Self {
|
||||
seek_head: MultivalueStartIndexIter {
|
||||
column,
|
||||
doc_id_map,
|
||||
new_doc_id: 0,
|
||||
offset: 0u64,
|
||||
},
|
||||
seek_next_id: 0u64,
|
||||
}
|
||||
}
|
||||
min: u64,
|
||||
max: u64,
|
||||
}
|
||||
|
||||
impl<'a, C: Column> MultivalueStartIndex<'a, C> {
|
||||
pub fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
|
||||
assert_eq!(column.num_vals(), doc_id_map.num_old_doc_ids() as u64 + 1);
|
||||
let (min, max) =
|
||||
tantivy_bitpacker::minmax(iter_remapped_multivalue_index(doc_id_map, column))
|
||||
.unwrap_or((0u64, 0u64));
|
||||
MultivalueStartIndex {
|
||||
column,
|
||||
doc_id_map,
|
||||
min_max_opt: Mutex::default(),
|
||||
random_seeker: Mutex::new(MultivalueStartIndexRandomSeeker::new(column, doc_id_map)),
|
||||
min,
|
||||
max,
|
||||
}
|
||||
}
|
||||
|
||||
fn minmax(&self) -> (u64, u64) {
|
||||
if let Some((min, max)) = *self.min_max_opt.lock().unwrap() {
|
||||
return (min, max);
|
||||
}
|
||||
let (min, max) = tantivy_bitpacker::minmax(self.iter()).unwrap_or((0u64, 0u64));
|
||||
*self.min_max_opt.lock().unwrap() = Some((min, max));
|
||||
(min, max)
|
||||
}
|
||||
}
|
||||
impl<'a, C: Column> Column for MultivalueStartIndex<'a, C> {
|
||||
fn get_val(&self, idx: u64) -> u64 {
|
||||
let mut random_seeker_lock = self.random_seeker.lock().unwrap();
|
||||
if random_seeker_lock.seek_next_id > idx {
|
||||
*random_seeker_lock =
|
||||
MultivalueStartIndexRandomSeeker::new(self.column, self.doc_id_map);
|
||||
}
|
||||
let to_skip = idx - random_seeker_lock.seek_next_id;
|
||||
random_seeker_lock.seek_next_id = idx + 1;
|
||||
random_seeker_lock.seek_head.nth(to_skip as usize).unwrap()
|
||||
fn get_val(&self, _idx: u64) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn min_value(&self) -> u64 {
|
||||
self.minmax().0
|
||||
self.min
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.minmax().1
|
||||
self.max
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
(self.doc_id_map.num_new_doc_ids() + 1) as u64
|
||||
}
|
||||
|
||||
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
|
||||
Box::new(MultivalueStartIndexIter::new(self.column, self.doc_id_map))
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(iter_remapped_multivalue_index(
|
||||
self.doc_id_map,
|
||||
&self.column,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct MultivalueStartIndexIter<'a, C: Column> {
|
||||
pub column: &'a C,
|
||||
pub doc_id_map: &'a DocIdMapping,
|
||||
pub new_doc_id: usize,
|
||||
pub offset: u64,
|
||||
}
|
||||
|
||||
impl<'a, C: Column> MultivalueStartIndexIter<'a, C> {
|
||||
fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
|
||||
Self {
|
||||
column,
|
||||
doc_id_map,
|
||||
new_doc_id: 0,
|
||||
offset: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, C: Column> Iterator for MultivalueStartIndexIter<'a, C> {
|
||||
type Item = u64;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.new_doc_id > self.doc_id_map.num_new_doc_ids() {
|
||||
return None;
|
||||
}
|
||||
let new_doc_id = self.new_doc_id;
|
||||
self.new_doc_id += 1;
|
||||
let start_offset = self.offset;
|
||||
if new_doc_id < self.doc_id_map.num_new_doc_ids() {
|
||||
let old_doc = self.doc_id_map.get_old_doc_id(new_doc_id as u32) as u64;
|
||||
let num_vals_for_doc = self.column.get_val(old_doc + 1) - self.column.get_val(old_doc);
|
||||
self.offset += num_vals_for_doc;
|
||||
}
|
||||
Some(start_offset)
|
||||
}
|
||||
fn iter_remapped_multivalue_index<'a, C: Column>(
|
||||
doc_id_map: &'a DocIdMapping,
|
||||
column: &'a C,
|
||||
) -> impl Iterator<Item = u64> + 'a {
|
||||
let mut offset = 0;
|
||||
std::iter::once(0).chain(doc_id_map.iter_old_doc_ids().map(move |old_doc| {
|
||||
let num_vals_for_doc = column.get_val(old_doc as u64 + 1) - column.get_val(old_doc as u64);
|
||||
offset += num_vals_for_doc;
|
||||
offset as u64
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -344,11 +296,5 @@ mod tests {
|
||||
vec![0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
|
||||
);
|
||||
assert_eq!(multivalue_start_index.num_vals(), 11);
|
||||
assert_eq!(multivalue_start_index.get_val(3), 2);
|
||||
assert_eq!(multivalue_start_index.get_val(5), 5);
|
||||
assert_eq!(multivalue_start_index.get_val(8), 21);
|
||||
assert_eq!(multivalue_start_index.get_val(4), 3);
|
||||
assert_eq!(multivalue_start_index.get_val(0), 0);
|
||||
assert_eq!(multivalue_start_index.get_val(10), 55);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ impl FastFieldReaders {
|
||||
Ok(MultiValuedFastFieldReader::open(idx_reader, vals_reader))
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated to `field`.
|
||||
/// Returns the `u64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a u64 fast field, this method returns an Error.
|
||||
pub fn u64(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
||||
@@ -143,16 +143,16 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `u64` fast field reader reader associated to `field`, regardless of whether the
|
||||
/// given field is effectively of type `u64` or not.
|
||||
/// Returns the `u64` fast field reader reader associated with `field`, regardless of whether
|
||||
/// the given field is effectively of type `u64` or not.
|
||||
///
|
||||
/// If not, the fastfield reader will returns the u64-value associated to the original
|
||||
/// If not, the fastfield reader will returns the u64-value associated with the original
|
||||
/// FastValue.
|
||||
pub fn u64_lenient(&self, field: Field) -> crate::Result<Arc<dyn Column<u64>>> {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||
/// Returns the `i64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a i64 fast field, this method returns an Error.
|
||||
pub fn i64(&self, field: Field) -> crate::Result<Arc<dyn Column<i64>>> {
|
||||
@@ -160,7 +160,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `date` fast field reader reader associated to `field`.
|
||||
/// Returns the `date` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a date fast field, this method returns an Error.
|
||||
pub fn date(&self, field: Field) -> crate::Result<Arc<dyn Column<DateTime>>> {
|
||||
@@ -168,7 +168,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `f64` fast field reader reader associated to `field`.
|
||||
/// Returns the `f64` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a f64 fast field, this method returns an Error.
|
||||
pub fn f64(&self, field: Field) -> crate::Result<Arc<dyn Column<f64>>> {
|
||||
@@ -176,7 +176,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `bool` fast field reader reader associated to `field`.
|
||||
/// Returns the `bool` fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a bool fast field, this method returns an Error.
|
||||
pub fn bool(&self, field: Field) -> crate::Result<Arc<dyn Column<bool>>> {
|
||||
@@ -184,7 +184,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`.
|
||||
/// Returns a `u64s` multi-valued fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
|
||||
pub fn u64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
||||
@@ -192,15 +192,15 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`, regardless of
|
||||
/// whether the given field is effectively of type `u64` or not.
|
||||
/// Returns a `u64s` multi-valued fast field reader reader associated with `field`, regardless
|
||||
/// of whether the given field is effectively of type `u64` or not.
|
||||
///
|
||||
/// If `field` is not a u64 multi-valued fast field, this method returns an Error.
|
||||
pub fn u64s_lenient(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `i64s` multi-valued fast field reader reader associated to `field`.
|
||||
/// Returns a `i64s` multi-valued fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a i64 multi-valued fast field, this method returns an Error.
|
||||
pub fn i64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<i64>> {
|
||||
@@ -208,7 +208,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `f64s` multi-valued fast field reader reader associated to `field`.
|
||||
/// Returns a `f64s` multi-valued fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a f64 multi-valued fast field, this method returns an Error.
|
||||
pub fn f64s(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<f64>> {
|
||||
@@ -216,7 +216,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `bools` multi-valued fast field reader reader associated to `field`.
|
||||
/// Returns a `bools` multi-valued fast field reader reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a bool multi-valued fast field, this method returns an Error.
|
||||
pub fn bools(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<bool>> {
|
||||
@@ -224,7 +224,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns a `time::OffsetDateTime` multi-valued fast field reader reader associated to
|
||||
/// Returns a `time::OffsetDateTime` multi-valued fast field reader reader associated with
|
||||
/// `field`.
|
||||
///
|
||||
/// If `field` is not a `time::OffsetDateTime` multi-valued fast field, this method returns an
|
||||
@@ -234,7 +234,7 @@ impl FastFieldReaders {
|
||||
self.typed_fast_field_multi_reader(field)
|
||||
}
|
||||
|
||||
/// Returns the `bytes` fast field reader associated to `field`.
|
||||
/// Returns the `bytes` fast field reader associated with `field`.
|
||||
///
|
||||
/// If `field` is not a bytes fast field, returns an Error.
|
||||
pub fn bytes(&self, field: Field) -> crate::Result<BytesFastFieldReader> {
|
||||
|
||||
@@ -70,6 +70,20 @@ impl CompositeFastFieldSerializer {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serialize data into a new u64 fast field. The best compression codec of the the provided
|
||||
/// will be chosen.
|
||||
pub fn create_auto_detect_u64_fast_field_with_idx_and_codecs<T: MonotonicallyMappableToU64>(
|
||||
&mut self,
|
||||
field: Field,
|
||||
fastfield_accessor: impl Column<T>,
|
||||
idx: usize,
|
||||
codec_types: &[FastFieldCodecType],
|
||||
) -> io::Result<()> {
|
||||
let field_write = self.composite_write.for_field_with_idx(field, idx);
|
||||
fastfield_codecs::serialize(fastfield_accessor, field_write, codec_types)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start serializing a new [u8] fast field. Use the returned writer to write data into the
|
||||
/// bytes field. To associate the bytes with documents a seperate index must be created on
|
||||
/// index 0. See bytes/writer.rs::serialize for an example.
|
||||
|
||||
@@ -131,7 +131,7 @@ impl FastFieldsWriter {
|
||||
.sum::<usize>()
|
||||
}
|
||||
|
||||
/// Get the `FastFieldWriter` associated to a field.
|
||||
/// Get the `FastFieldWriter` associated with a field.
|
||||
pub fn get_term_id_writer(&self, field: Field) -> Option<&MultiValuedFastFieldWriter> {
|
||||
// TODO optimize
|
||||
self.term_id_writers
|
||||
@@ -139,7 +139,7 @@ impl FastFieldsWriter {
|
||||
.find(|field_writer| field_writer.field() == field)
|
||||
}
|
||||
|
||||
/// Get the `FastFieldWriter` associated to a field.
|
||||
/// Get the `FastFieldWriter` associated with a field.
|
||||
pub fn get_field_writer(&self, field: Field) -> Option<&IntFastFieldWriter> {
|
||||
// TODO optimize
|
||||
self.single_value_writers
|
||||
@@ -147,7 +147,7 @@ impl FastFieldsWriter {
|
||||
.find(|field_writer| field_writer.field() == field)
|
||||
}
|
||||
|
||||
/// Get the `FastFieldWriter` associated to a field.
|
||||
/// Get the `FastFieldWriter` associated with a field.
|
||||
pub fn get_field_writer_mut(&mut self, field: Field) -> Option<&mut IntFastFieldWriter> {
|
||||
// TODO optimize
|
||||
self.single_value_writers
|
||||
@@ -155,7 +155,7 @@ impl FastFieldsWriter {
|
||||
.find(|field_writer| field_writer.field() == field)
|
||||
}
|
||||
|
||||
/// Get the `FastFieldWriter` associated to a field.
|
||||
/// Get the `FastFieldWriter` associated with a field.
|
||||
pub fn get_term_id_writer_mut(
|
||||
&mut self,
|
||||
field: Field,
|
||||
@@ -294,7 +294,7 @@ impl IntFastFieldWriter {
|
||||
/// Records a new value.
|
||||
///
|
||||
/// The n-th value being recorded is implicitly
|
||||
/// associated to the document with the `DocId` n.
|
||||
/// associated with the document with the `DocId` n.
|
||||
/// (Well, `n-1` actually because of 0-indexing)
|
||||
pub fn add_val(&mut self, val: u64) {
|
||||
self.vals.add(val);
|
||||
@@ -313,7 +313,7 @@ impl IntFastFieldWriter {
|
||||
/// (or use the default value) and records it.
|
||||
///
|
||||
///
|
||||
/// Extract the value associated to the fast field for
|
||||
/// Extract the value associated with the fast field for
|
||||
/// this document.
|
||||
///
|
||||
/// i64 and f64 are remapped to u64 using the logic
|
||||
@@ -383,7 +383,7 @@ struct WriterFastFieldAccessProvider<'map, 'bitp> {
|
||||
}
|
||||
|
||||
impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
|
||||
/// Return the value associated to the given doc.
|
||||
/// Return the value associated with the given doc.
|
||||
///
|
||||
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance
|
||||
/// reasons.
|
||||
@@ -391,15 +391,8 @@ impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `doc` is greater than the index.
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
if let Some(doc_id_map) = self.doc_id_map {
|
||||
self.vals
|
||||
.get(doc_id_map.get_old_doc_id(doc as u32) as usize) // consider extra
|
||||
// FastFieldReader wrapper for
|
||||
// non doc_id_map
|
||||
} else {
|
||||
self.vals.get(doc as usize)
|
||||
}
|
||||
fn get_val(&self, _doc: u64) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//! The fieldnorm represents the length associated to
|
||||
//! The fieldnorm represents the length associated with
|
||||
//! a given Field of a given document.
|
||||
//!
|
||||
//! This metric is important to compute the score of a
|
||||
|
||||
@@ -47,9 +47,9 @@ impl FieldNormReaders {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads the fieldnorm associated to a document.
|
||||
/// Reads the fieldnorm associated with a document.
|
||||
///
|
||||
/// The [fieldnorm](FieldNormReader::fieldnorm) represents the length associated to
|
||||
/// The [fieldnorm](FieldNormReader::fieldnorm) represents the length associated with
|
||||
/// a given Field of a given document.
|
||||
#[derive(Clone)]
|
||||
pub struct FieldNormReader(ReaderImplEnum);
|
||||
@@ -104,7 +104,7 @@ impl FieldNormReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `fieldnorm` associated to a doc id.
|
||||
/// Returns the `fieldnorm` associated with a doc id.
|
||||
/// The fieldnorm is a value approximating the number
|
||||
/// of tokens in a given field of the `doc_id`.
|
||||
///
|
||||
@@ -123,7 +123,7 @@ impl FieldNormReader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `fieldnorm_id` associated to a document.
|
||||
/// Returns the `fieldnorm_id` associated with a document.
|
||||
#[inline]
|
||||
pub fn fieldnorm_id(&self, doc_id: DocId) -> u8 {
|
||||
match &self.0 {
|
||||
|
||||
@@ -188,7 +188,7 @@ impl DeleteCursor {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::wrong_self_convention))]
|
||||
#[allow(clippy::wrong_self_convention)]
|
||||
fn is_behind_opstamp(&mut self, target_opstamp: Opstamp) -> bool {
|
||||
self.get()
|
||||
.map(|operation| operation.opstamp < target_opstamp)
|
||||
|
||||
@@ -24,7 +24,7 @@ impl SegmentDocIdMapping {
|
||||
|
||||
/// Returns an iterator over the old document addresses, ordered by the new document ids.
|
||||
///
|
||||
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targetted segment
|
||||
/// In the returned `DocAddress`, the `segment_ord` is the ordinal of targeted segment
|
||||
/// in the list of merged segments.
|
||||
pub(crate) fn iter_old_doc_addrs(&self) -> impl Iterator<Item = DocAddress> + '_ {
|
||||
self.new_doc_id_to_old_doc_addr.iter().copied()
|
||||
@@ -34,10 +34,6 @@ impl SegmentDocIdMapping {
|
||||
self.new_doc_id_to_old_doc_addr.len()
|
||||
}
|
||||
|
||||
pub(crate) fn get_old_doc_addr(&self, new_doc_id: DocId) -> DocAddress {
|
||||
self.new_doc_id_to_old_doc_addr[new_doc_id as usize]
|
||||
}
|
||||
|
||||
/// This flags means the segments are simply stacked in the order of their ordinal.
|
||||
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
|
||||
///
|
||||
|
||||
69
src/indexer/flat_map_with_buffer.rs
Normal file
69
src/indexer/flat_map_with_buffer.rs
Normal file
@@ -0,0 +1,69 @@
|
||||
pub struct FlatMapWithBuffer<T, F, Iter> {
|
||||
buffer: Vec<T>,
|
||||
fill_buffer: F,
|
||||
underlying_it: Iter,
|
||||
}
|
||||
|
||||
impl<T, F, Iter, I> Iterator for FlatMapWithBuffer<T, F, Iter>
|
||||
where
|
||||
Iter: Iterator<Item = I>,
|
||||
F: Fn(I, &mut Vec<T>),
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while self.buffer.is_empty() {
|
||||
let next_el = self.underlying_it.next()?;
|
||||
(self.fill_buffer)(next_el, &mut self.buffer);
|
||||
// We will pop elements, so we reverse the buffer first.
|
||||
self.buffer.reverse();
|
||||
}
|
||||
self.buffer.pop()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FlatMapWithBufferIter: Iterator {
|
||||
/// Function similar to `flat_map`, but allows reusing a shared `Vec`.
|
||||
fn flat_map_with_buffer<F, T>(self, fill_buffer: F) -> FlatMapWithBuffer<T, F, Self>
|
||||
where
|
||||
F: Fn(Self::Item, &mut Vec<T>),
|
||||
Self: Sized,
|
||||
{
|
||||
FlatMapWithBuffer {
|
||||
buffer: Vec::with_capacity(10),
|
||||
fill_buffer,
|
||||
underlying_it: self,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> FlatMapWithBufferIter for T where T: Iterator {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::indexer::flat_map_with_buffer::FlatMapWithBufferIter;
|
||||
|
||||
#[test]
|
||||
fn test_flat_map_with_buffer_empty() {
|
||||
let mut empty_iter = std::iter::empty::<usize>()
|
||||
.flat_map_with_buffer(|_val: usize, _buffer: &mut Vec<usize>| {});
|
||||
assert!(empty_iter.next().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flat_map_with_buffer_simple() {
|
||||
let vals: Vec<usize> = (1..5)
|
||||
.flat_map_with_buffer(|val: usize, buffer: &mut Vec<usize>| buffer.extend(0..val))
|
||||
.collect();
|
||||
assert_eq!(&[0, 0, 1, 0, 1, 2, 0, 1, 2, 3], &vals[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flat_map_filling_no_elements_does_not_stop_iterator() {
|
||||
let vals: Vec<usize> = [2, 0, 0, 3]
|
||||
.into_iter()
|
||||
.flat_map_with_buffer(|val: usize, buffer: &mut Vec<usize>| buffer.extend(0..val))
|
||||
.collect();
|
||||
assert_eq!(&[0, 1, 0, 1, 2], &vals[..]);
|
||||
}
|
||||
}
|
||||
@@ -370,9 +370,9 @@ impl IndexWriter {
|
||||
/// This method is useful only for users trying to do complex
|
||||
/// operations, like converting an index format to another.
|
||||
///
|
||||
/// It is safe to start writing file associated to the new `Segment`.
|
||||
/// It is safe to start writing file associated with the new `Segment`.
|
||||
/// These will not be garbage collected as long as an instance object of
|
||||
/// `SegmentMeta` object associated to the new `Segment` is "alive".
|
||||
/// `SegmentMeta` object associated with the new `Segment` is "alive".
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
self.index.new_segment()
|
||||
}
|
||||
@@ -1395,6 +1395,35 @@ mod tests {
|
||||
assert!(commit_again.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_by_multivalue_field_error() -> crate::Result<()> {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let options = NumericOptions::default().set_fast(Cardinality::MultiValues);
|
||||
schema_builder.add_u64_field("id", options);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let settings = IndexSettings {
|
||||
sort_by_field: Some(IndexSortByField {
|
||||
field: "id".to_string(),
|
||||
order: Order::Desc,
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let err = Index::builder()
|
||||
.schema(schema)
|
||||
.settings(settings)
|
||||
.create_in_ram()
|
||||
.unwrap_err();
|
||||
assert_eq!(
|
||||
err.to_string(),
|
||||
"An invalid argument was passed: 'Only single value fast field Cardinality supported \
|
||||
for sorting index id'"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_with_sort_by_field() -> crate::Result<()> {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
|
||||
@@ -6,16 +6,18 @@ use fastfield_codecs::VecColumn;
|
||||
use itertools::Itertools;
|
||||
use measure_time::debug_time;
|
||||
|
||||
use super::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueIndexColumn;
|
||||
use crate::core::{Segment, SegmentReader};
|
||||
use crate::docset::{DocSet, TERMINATED};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::fastfield::{
|
||||
AliveBitSet, Column, CompositeFastFieldSerializer, MultiValueLength, MultiValuedFastFieldReader,
|
||||
get_fastfield_codecs_for_multivalue, AliveBitSet, Column, CompositeFastFieldSerializer,
|
||||
MultiValueLength, MultiValuedFastFieldReader,
|
||||
};
|
||||
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
|
||||
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
|
||||
use crate::indexer::sorted_doc_id_column::SortedDocIdColumn;
|
||||
use crate::indexer::sorted_doc_id_multivalue_column::SortedDocIdMultiValueColumn;
|
||||
use crate::indexer::sorted_doc_id_column::RemappedDocIdColumn;
|
||||
use crate::indexer::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueColumn;
|
||||
use crate::indexer::SegmentSerializer;
|
||||
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
|
||||
use crate::schema::{Cardinality, Field, FieldType, Schema};
|
||||
@@ -310,7 +312,7 @@ impl IndexMerger {
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<()> {
|
||||
let fast_field_accessor = SortedDocIdColumn::new(&self.readers, doc_id_mapping, field);
|
||||
let fast_field_accessor = RemappedDocIdColumn::new(&self.readers, doc_id_mapping, field);
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(field, fast_field_accessor)?;
|
||||
|
||||
Ok(())
|
||||
@@ -423,33 +425,17 @@ impl IndexMerger {
|
||||
// Creating the index file to point into the data, generic over `BytesFastFieldReader` and
|
||||
// `MultiValuedFastFieldReader`
|
||||
//
|
||||
fn write_1_n_fast_field_idx_generic<T: MultiValueLength>(
|
||||
fn write_1_n_fast_field_idx_generic<T: MultiValueLength + Send + Sync>(
|
||||
field: Field,
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
reader_and_field_accessors: &[(&SegmentReader, T)],
|
||||
) -> crate::Result<Vec<u64>> {
|
||||
// We can now create our `idx` serializer, and in a second pass,
|
||||
// can effectively push the different indexes.
|
||||
segment_and_ff_readers: &[(&SegmentReader, T)],
|
||||
) -> crate::Result<()> {
|
||||
let column =
|
||||
RemappedDocIdMultiValueIndexColumn::new(segment_and_ff_readers, doc_id_mapping);
|
||||
|
||||
// copying into a temp vec is not ideal, but the fast field codec api requires random
|
||||
// access, which is used in the estimation. It's possible to 1. calculate random
|
||||
// access on the fly or 2. change the codec api to make random access optional, but
|
||||
// they both have also major drawbacks.
|
||||
|
||||
let mut offsets = Vec::with_capacity(doc_id_mapping.len());
|
||||
let mut offset = 0;
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
|
||||
offsets.push(offset);
|
||||
offset += reader.get_len(old_doc_addr.doc_id) as u64;
|
||||
}
|
||||
offsets.push(offset);
|
||||
|
||||
let fastfield_accessor = VecColumn::from(&offsets[..]);
|
||||
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(field, fastfield_accessor)?;
|
||||
Ok(offsets)
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field(field, column)?;
|
||||
Ok(())
|
||||
}
|
||||
/// Returns the fastfield index (index for the data, not the data).
|
||||
fn write_multi_value_fast_field_idx(
|
||||
@@ -457,8 +443,8 @@ impl IndexMerger {
|
||||
field: Field,
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<Vec<u64>> {
|
||||
let reader_ordinal_and_field_accessors = self
|
||||
) -> crate::Result<()> {
|
||||
let segment_and_ff_readers = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
@@ -477,7 +463,7 @@ impl IndexMerger {
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
&reader_ordinal_and_field_accessors,
|
||||
&segment_and_ff_readers,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -526,7 +512,12 @@ impl IndexMerger {
|
||||
}
|
||||
|
||||
let col = VecColumn::from(&vals[..]);
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(field, col, 1)?;
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx_and_codecs(
|
||||
field,
|
||||
col,
|
||||
1,
|
||||
&get_fastfield_codecs_for_multivalue(),
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -561,20 +552,21 @@ impl IndexMerger {
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<()> {
|
||||
// Multifastfield consists in 2 fastfields.
|
||||
// Multifastfield consists of 2 fastfields.
|
||||
// The first serves as an index into the second one and is strictly increasing.
|
||||
// The second contains the actual values.
|
||||
|
||||
// First we merge the idx fast field.
|
||||
let offsets =
|
||||
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
|
||||
|
||||
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
|
||||
|
||||
let fastfield_accessor =
|
||||
SortedDocIdMultiValueColumn::new(&self.readers, doc_id_mapping, &offsets, field);
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
|
||||
RemappedDocIdMultiValueColumn::new(&self.readers, doc_id_mapping, field);
|
||||
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx_and_codecs(
|
||||
field,
|
||||
fastfield_accessor,
|
||||
1,
|
||||
&get_fastfield_codecs_for_multivalue(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
@@ -586,7 +578,7 @@ impl IndexMerger {
|
||||
fast_field_serializer: &mut CompositeFastFieldSerializer,
|
||||
doc_id_mapping: &SegmentDocIdMapping,
|
||||
) -> crate::Result<()> {
|
||||
let reader_and_field_accessors = self
|
||||
let segment_and_ff_readers = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| {
|
||||
@@ -597,17 +589,17 @@ impl IndexMerger {
|
||||
(reader, bytes_reader)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Self::write_1_n_fast_field_idx_generic(
|
||||
field,
|
||||
fast_field_serializer,
|
||||
doc_id_mapping,
|
||||
&reader_and_field_accessors,
|
||||
&segment_and_ff_readers,
|
||||
)?;
|
||||
|
||||
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field(field);
|
||||
|
||||
for old_doc_addr in doc_id_mapping.iter_old_doc_addrs() {
|
||||
let bytes_reader = &reader_and_field_accessors[old_doc_addr.segment_ord as usize].1;
|
||||
let bytes_reader = &segment_and_ff_readers[old_doc_addr.segment_ord as usize].1;
|
||||
let val = bytes_reader.get_bytes(old_doc_addr.doc_id);
|
||||
serialize_vals.write_all(val)?;
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ pub mod delete_queue;
|
||||
pub mod demuxer;
|
||||
pub mod doc_id_mapping;
|
||||
mod doc_opstamp_mapping;
|
||||
mod flat_map_with_buffer;
|
||||
pub mod index_writer;
|
||||
mod index_writer_status;
|
||||
mod json_term_writer;
|
||||
|
||||
@@ -17,7 +17,7 @@ impl<'a> PreparedCommit<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the opstamp associated to the prepared commit.
|
||||
/// Returns the opstamp associated with the prepared commit.
|
||||
pub fn opstamp(&self) -> Opstamp {
|
||||
self.opstamp
|
||||
}
|
||||
|
||||
@@ -24,12 +24,25 @@ impl SegmentSerializer {
|
||||
// In the merge case this is not necessary because we can kmerge the already sorted
|
||||
// segments
|
||||
let remapping_required = segment.index().settings().sort_by_field.is_some() && !is_in_merge;
|
||||
let store_component = if remapping_required {
|
||||
SegmentComponent::TempStore
|
||||
let settings = segment.index().settings().clone();
|
||||
let store_writer = if remapping_required {
|
||||
let store_write = segment.open_write(SegmentComponent::TempStore)?;
|
||||
StoreWriter::new(
|
||||
store_write,
|
||||
crate::store::Compressor::None,
|
||||
0, // we want random access on the docs, so we choose a minimal block size. Every
|
||||
// doc will get its own block.
|
||||
settings.docstore_compress_dedicated_thread,
|
||||
)?
|
||||
} else {
|
||||
SegmentComponent::Store
|
||||
let store_write = segment.open_write(SegmentComponent::Store)?;
|
||||
StoreWriter::new(
|
||||
store_write,
|
||||
settings.docstore_compression,
|
||||
settings.docstore_blocksize,
|
||||
settings.docstore_compress_dedicated_thread,
|
||||
)?
|
||||
};
|
||||
let store_write = segment.open_write(store_component)?;
|
||||
|
||||
let fast_field_write = segment.open_write(SegmentComponent::FastFields)?;
|
||||
let fast_field_serializer = CompositeFastFieldSerializer::from_write(fast_field_write)?;
|
||||
@@ -38,13 +51,6 @@ impl SegmentSerializer {
|
||||
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
|
||||
|
||||
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
|
||||
let settings = segment.index().settings();
|
||||
let store_writer = StoreWriter::new(
|
||||
store_write,
|
||||
settings.docstore_compression,
|
||||
settings.docstore_blocksize,
|
||||
settings.docstore_compress_dedicated_thread,
|
||||
)?;
|
||||
Ok(SegmentSerializer {
|
||||
segment,
|
||||
store_writer,
|
||||
|
||||
@@ -133,15 +133,15 @@ fn merge(
|
||||
|
||||
/// Advanced: Merges a list of segments from different indices in a new index.
|
||||
///
|
||||
/// Returns `TantivyError` if the the indices list is empty or their
|
||||
/// Returns `TantivyError` if the indices list is empty or their
|
||||
/// schemas don't match.
|
||||
///
|
||||
/// `output_directory`: is assumed to be empty.
|
||||
///
|
||||
/// # Warning
|
||||
/// This function does NOT check or take the `IndexWriter` is running. It is not
|
||||
/// meant to work if you have an IndexWriter running for the origin indices, or
|
||||
/// the destination Index.
|
||||
/// meant to work if you have an `IndexWriter` running for the origin indices, or
|
||||
/// the destination `Index`.
|
||||
#[doc(hidden)]
|
||||
pub fn merge_indices<T: Into<Box<dyn Directory>>>(
|
||||
indices: &[Index],
|
||||
@@ -179,15 +179,15 @@ pub fn merge_indices<T: Into<Box<dyn Directory>>>(
|
||||
/// Advanced: Merges a list of segments from different indices in a new index.
|
||||
/// Additional you can provide a delete bitset for each segment to ignore doc_ids.
|
||||
///
|
||||
/// Returns `TantivyError` if the the indices list is empty or their
|
||||
/// Returns `TantivyError` if the indices list is empty or their
|
||||
/// schemas don't match.
|
||||
///
|
||||
/// `output_directory`: is assumed to be empty.
|
||||
///
|
||||
/// # Warning
|
||||
/// This function does NOT check or take the `IndexWriter` is running. It is not
|
||||
/// meant to work if you have an IndexWriter running for the origin indices, or
|
||||
/// the destination Index.
|
||||
/// meant to work if you have an `IndexWriter` running for the origin indices, or
|
||||
/// the destination `Index`.
|
||||
#[doc(hidden)]
|
||||
pub fn merge_filtered_segments<T: Into<Box<dyn Directory>>>(
|
||||
segments: &[Segment],
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use fastfield_codecs::MonotonicallyMappableToU64;
|
||||
use itertools::Itertools;
|
||||
|
||||
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
|
||||
use super::operation::AddOperation;
|
||||
@@ -157,7 +158,13 @@ impl SegmentWriter {
|
||||
|
||||
fn index_document(&mut self, doc: &Document) -> crate::Result<()> {
|
||||
let doc_id = self.max_doc;
|
||||
for (field, values) in doc.get_sorted_field_values() {
|
||||
let vals_grouped_by_field = doc
|
||||
.field_values()
|
||||
.iter()
|
||||
.sorted_by_key(|el| el.field())
|
||||
.group_by(|el| el.field());
|
||||
for (field, field_values) in &vals_grouped_by_field {
|
||||
let values = field_values.map(|field_value| field_value.value());
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let make_schema_error = || {
|
||||
crate::TantivyError::SchemaError(format!(
|
||||
@@ -198,24 +205,16 @@ impl SegmentWriter {
|
||||
}
|
||||
FieldType::Str(_) => {
|
||||
let mut token_streams: Vec<BoxTokenStream> = vec![];
|
||||
let mut offsets = vec![];
|
||||
let mut total_offset = 0;
|
||||
|
||||
for value in values {
|
||||
match value {
|
||||
Value::PreTokStr(tok_str) => {
|
||||
offsets.push(total_offset);
|
||||
if let Some(last_token) = tok_str.tokens.last() {
|
||||
total_offset += last_token.offset_to;
|
||||
}
|
||||
token_streams
|
||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
||||
}
|
||||
Value::Str(ref text) => {
|
||||
let text_analyzer =
|
||||
&self.per_field_text_analyzers[field.field_id() as usize];
|
||||
offsets.push(total_offset);
|
||||
total_offset += text.len();
|
||||
token_streams.push(text_analyzer.token_stream(text));
|
||||
}
|
||||
_ => (),
|
||||
@@ -284,9 +283,8 @@ impl SegmentWriter {
|
||||
}
|
||||
FieldType::JsonObject(_) => {
|
||||
let text_analyzer = &self.per_field_text_analyzers[field.field_id() as usize];
|
||||
let json_values_it = values
|
||||
.iter()
|
||||
.map(|value| value.as_json().ok_or_else(make_schema_error));
|
||||
let json_values_it =
|
||||
values.map(|value| value.as_json().ok_or_else(make_schema_error));
|
||||
index_json_values(
|
||||
doc_id,
|
||||
json_values_it,
|
||||
@@ -374,9 +372,9 @@ fn remap_and_write(
|
||||
doc_id_map,
|
||||
)?;
|
||||
|
||||
debug!("resort-docstore");
|
||||
// finalize temp docstore and create version, which reflects the doc_id_map
|
||||
if let Some(doc_id_map) = doc_id_map {
|
||||
debug!("resort-docstore");
|
||||
let store_write = serializer
|
||||
.segment_mut()
|
||||
.open_write(SegmentComponent::Store)?;
|
||||
@@ -393,7 +391,8 @@ fn remap_and_write(
|
||||
serializer
|
||||
.segment()
|
||||
.open_read(SegmentComponent::TempStore)?,
|
||||
50,
|
||||
1, /* The docstore is configured to have one doc per block, and each doc is accessed
|
||||
* only once: we don't need caching. */
|
||||
)?;
|
||||
for old_doc_id in doc_id_map.iter_old_doc_ids() {
|
||||
let doc_bytes = store_read.get_document_bytes(old_doc_id)?;
|
||||
|
||||
@@ -5,9 +5,9 @@ use itertools::Itertools;
|
||||
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
use crate::schema::Field;
|
||||
use crate::{DocAddress, SegmentReader};
|
||||
use crate::SegmentReader;
|
||||
|
||||
pub(crate) struct SortedDocIdColumn<'a> {
|
||||
pub(crate) struct RemappedDocIdColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: Vec<Arc<dyn Column<u64>>>,
|
||||
min_value: u64,
|
||||
@@ -37,7 +37,7 @@ fn compute_min_max_val(
|
||||
.into_option()
|
||||
}
|
||||
|
||||
impl<'a> SortedDocIdColumn<'a> {
|
||||
impl<'a> RemappedDocIdColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
readers: &'a [SegmentReader],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
@@ -68,7 +68,7 @@ impl<'a> SortedDocIdColumn<'a> {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
SortedDocIdColumn {
|
||||
RemappedDocIdColumn {
|
||||
doc_id_mapping,
|
||||
fast_field_readers,
|
||||
min_value,
|
||||
@@ -78,13 +78,9 @@ impl<'a> SortedDocIdColumn<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for SortedDocIdColumn<'a> {
|
||||
fn get_val(&self, doc: u64) -> u64 {
|
||||
let DocAddress {
|
||||
doc_id,
|
||||
segment_ord,
|
||||
} = self.doc_id_mapping.get_old_doc_addr(doc as u32);
|
||||
self.fast_field_readers[segment_ord as usize].get_val(doc_id as u64)
|
||||
impl<'a> Column for RemappedDocIdColumn<'a> {
|
||||
fn get_val(&self, _doc: u64) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
|
||||
@@ -2,26 +2,24 @@ use std::cmp;
|
||||
|
||||
use fastfield_codecs::Column;
|
||||
|
||||
use super::flat_map_with_buffer::FlatMapWithBufferIter;
|
||||
use crate::fastfield::{MultiValueLength, MultiValuedFastFieldReader};
|
||||
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
|
||||
use crate::schema::Field;
|
||||
use crate::{DocId, SegmentReader};
|
||||
use crate::{DocAddress, SegmentReader};
|
||||
|
||||
// We can now initialize our serializer, and push it the different values
|
||||
pub(crate) struct SortedDocIdMultiValueColumn<'a> {
|
||||
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
|
||||
offsets: &'a [u64],
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u64,
|
||||
}
|
||||
|
||||
impl<'a> SortedDocIdMultiValueColumn<'a> {
|
||||
impl<'a> RemappedDocIdMultiValueColumn<'a> {
|
||||
pub(crate) fn new(
|
||||
readers: &'a [SegmentReader],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
offsets: &'a [u64],
|
||||
field: Field,
|
||||
) -> Self {
|
||||
// Our values are bitpacked and we need to know what should be
|
||||
@@ -58,10 +56,9 @@ impl<'a> SortedDocIdMultiValueColumn<'a> {
|
||||
min_value = 0;
|
||||
max_value = 0;
|
||||
}
|
||||
SortedDocIdMultiValueColumn {
|
||||
RemappedDocIdMultiValueColumn {
|
||||
doc_id_mapping,
|
||||
fast_field_readers,
|
||||
offsets,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals: num_vals as u64,
|
||||
@@ -69,41 +66,18 @@ impl<'a> SortedDocIdMultiValueColumn<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Column for SortedDocIdMultiValueColumn<'a> {
|
||||
fn get_val(&self, pos: u64) -> u64 {
|
||||
// use the offsets index to find the doc_id which will contain the position.
|
||||
// the offsets are strictly increasing so we can do a simple search on it.
|
||||
let new_doc_id: DocId = self
|
||||
.offsets
|
||||
.iter()
|
||||
.position(|&offset| offset > pos)
|
||||
.expect("pos is out of bounds") as DocId
|
||||
- 1u32;
|
||||
|
||||
// now we need to find the position of `pos` in the multivalued bucket
|
||||
let num_pos_covered_until_now = self.offsets[new_doc_id as usize];
|
||||
let pos_in_values = pos - num_pos_covered_until_now;
|
||||
|
||||
let old_doc_addr = self.doc_id_mapping.get_old_doc_addr(new_doc_id);
|
||||
let num_vals =
|
||||
self.fast_field_readers[old_doc_addr.segment_ord as usize].get_len(old_doc_addr.doc_id);
|
||||
assert!(num_vals >= pos_in_values);
|
||||
let mut vals = Vec::new();
|
||||
self.fast_field_readers[old_doc_addr.segment_ord as usize]
|
||||
.get_vals(old_doc_addr.doc_id, &mut vals);
|
||||
|
||||
vals[pos_in_values as usize]
|
||||
impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
|
||||
fn get_val(&self, _pos: u64) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
Box::new(
|
||||
self.doc_id_mapping
|
||||
.iter_old_doc_addrs()
|
||||
.flat_map(|old_doc_addr| {
|
||||
.flat_map_with_buffer(|old_doc_addr: DocAddress, buffer| {
|
||||
let ff_reader = &self.fast_field_readers[old_doc_addr.segment_ord as usize];
|
||||
let mut vals = Vec::new();
|
||||
ff_reader.get_vals(old_doc_addr.doc_id, &mut vals);
|
||||
vals.into_iter()
|
||||
ff_reader.get_vals(old_doc_addr.doc_id, buffer);
|
||||
}),
|
||||
)
|
||||
}
|
||||
@@ -119,3 +93,76 @@ impl<'a> Column for SortedDocIdMultiValueColumn<'a> {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct RemappedDocIdMultiValueIndexColumn<'a, T: MultiValueLength> {
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
multi_value_length_readers: Vec<&'a T>,
|
||||
min_value: u64,
|
||||
max_value: u64,
|
||||
num_vals: u64,
|
||||
}
|
||||
|
||||
impl<'a, T: MultiValueLength> RemappedDocIdMultiValueIndexColumn<'a, T> {
|
||||
pub(crate) fn new(
|
||||
segment_and_ff_readers: &'a [(&'a SegmentReader, T)],
|
||||
doc_id_mapping: &'a SegmentDocIdMapping,
|
||||
) -> Self {
|
||||
// We go through a complete first pass to compute the minimum and the
|
||||
// maximum value and initialize our Column.
|
||||
let mut num_vals = 0;
|
||||
let min_value = 0;
|
||||
let mut max_value = 0;
|
||||
let mut multi_value_length_readers = Vec::with_capacity(segment_and_ff_readers.len());
|
||||
for segment_and_ff_reader in segment_and_ff_readers {
|
||||
let segment_reader = segment_and_ff_reader.0;
|
||||
let multi_value_length_reader = &segment_and_ff_reader.1;
|
||||
if !segment_reader.has_deletes() {
|
||||
max_value += multi_value_length_reader.get_total_len();
|
||||
} else {
|
||||
for doc in segment_reader.doc_ids_alive() {
|
||||
max_value += multi_value_length_reader.get_len(doc);
|
||||
}
|
||||
}
|
||||
num_vals += segment_reader.num_docs() as u64;
|
||||
multi_value_length_readers.push(multi_value_length_reader);
|
||||
}
|
||||
Self {
|
||||
doc_id_mapping,
|
||||
multi_value_length_readers,
|
||||
min_value,
|
||||
max_value,
|
||||
num_vals,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: MultiValueLength + Send + Sync> Column for RemappedDocIdMultiValueIndexColumn<'a, T> {
|
||||
fn get_val(&self, _pos: u64) -> u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
|
||||
let mut offset = 0;
|
||||
Box::new(
|
||||
std::iter::once(0).chain(self.doc_id_mapping.iter_old_doc_addrs().map(
|
||||
move |old_doc_addr| {
|
||||
let ff_reader =
|
||||
&self.multi_value_length_readers[old_doc_addr.segment_ord as usize];
|
||||
offset += ff_reader.get_len(old_doc_addr.doc_id);
|
||||
offset
|
||||
},
|
||||
)),
|
||||
)
|
||||
}
|
||||
fn min_value(&self) -> u64 {
|
||||
self.min_value
|
||||
}
|
||||
|
||||
fn max_value(&self) -> u64 {
|
||||
self.max_value
|
||||
}
|
||||
|
||||
fn num_vals(&self) -> u64 {
|
||||
self.num_vals
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ impl BlockDecoder {
|
||||
|
||||
pub trait VIntEncoder {
|
||||
/// Compresses an array of `u32` integers,
|
||||
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_ encoding)
|
||||
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_encoding)
|
||||
/// and variable bytes encoding.
|
||||
///
|
||||
/// The method takes an array of ints to compress, and returns
|
||||
|
||||
@@ -31,7 +31,7 @@ pub use self::term_info::TermInfo;
|
||||
|
||||
pub(crate) type UnorderedTermId = u64;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
|
||||
pub(crate) enum FreqReadingOption {
|
||||
NoFreq,
|
||||
|
||||
@@ -47,11 +47,11 @@ impl<'a> Iterator for VInt32Reader<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Recorder is in charge of recording relevant information about
|
||||
/// `Recorder` is in charge of recording relevant information about
|
||||
/// the presence of a term in a document.
|
||||
///
|
||||
/// Depending on the `TextIndexingOptions` associated to the
|
||||
/// field, the recorder may records
|
||||
/// Depending on the [`TextOptions`](crate::schema::TextOptions) associated
|
||||
/// with the field, the recorder may record:
|
||||
/// * the document frequency
|
||||
/// * the document id
|
||||
/// * the term frequency
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use crate::postings::{branchless_binary_search, BlockSegmentPostings, Postings};
|
||||
use crate::{DocId, TERMINATED};
|
||||
|
||||
/// `SegmentPostings` represents the inverted list or postings associated to
|
||||
/// `SegmentPostings` represents the inverted list or postings associated with
|
||||
/// a term in a `Segment`.
|
||||
///
|
||||
/// As we iterate through the `SegmentPostings`, the frequencies are optionally decoded.
|
||||
@@ -216,7 +216,7 @@ impl HasLen for SegmentPostings {
|
||||
}
|
||||
|
||||
impl Postings for SegmentPostings {
|
||||
/// Returns the frequency associated to the current document.
|
||||
/// Returns the frequency associated with the current document.
|
||||
/// If the schema is set up so that no frequency have been encoded,
|
||||
/// this method should always return 1.
|
||||
///
|
||||
|
||||
@@ -199,7 +199,7 @@ impl TermHashMap {
|
||||
/// `update` create a new entry for a given key if it does not exists
|
||||
/// or updates the existing entry.
|
||||
///
|
||||
/// The actual logic for this update is define in the the `updater`
|
||||
/// The actual logic for this update is define in the `updater`
|
||||
/// argument.
|
||||
///
|
||||
/// If the key is not present, `updater` will receive `None` and
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::ops::Range;
|
||||
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
|
||||
/// `TermInfo` wraps the metadata associated to a Term.
|
||||
/// `TermInfo` wraps the metadata associated with a Term.
|
||||
/// It is segment-local.
|
||||
#[derive(Debug, Default, Eq, PartialEq, Clone)]
|
||||
pub struct TermInfo {
|
||||
|
||||
@@ -17,7 +17,7 @@ impl Query for AllQuery {
|
||||
}
|
||||
}
|
||||
|
||||
/// Weight associated to the `AllQuery` query.
|
||||
/// Weight associated with the `AllQuery` query.
|
||||
pub struct AllWeight;
|
||||
|
||||
impl Weight for AllWeight {
|
||||
@@ -37,7 +37,7 @@ impl Weight for AllWeight {
|
||||
}
|
||||
}
|
||||
|
||||
/// Scorer associated to the `AllQuery` query.
|
||||
/// Scorer associated with the `AllQuery` query.
|
||||
pub struct AllScorer {
|
||||
doc: DocId,
|
||||
max_doc: DocId,
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::DocId;
|
||||
/// when the bitset is sparse
|
||||
pub struct BitSetDocSet {
|
||||
docs: BitSet,
|
||||
cursor_bucket: u32, //< index associated to the current tiny bitset
|
||||
cursor_bucket: u32, //< index associated with the current tiny bitset
|
||||
cursor_tinybitset: TinySet,
|
||||
doc: u32,
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>,
|
||||
|
||||
/// Implements the WAND (Weak AND) algorithm for dynamic pruning
|
||||
/// described in the paper "Faster Top-k Document Retrieval Using Block-Max Indexes".
|
||||
/// Link: http://engineering.nyu.edu/~suel/papers/bmw.pdf
|
||||
/// Link: <http://engineering.nyu.edu/~suel/papers/bmw.pdf>
|
||||
pub fn block_wand(
|
||||
mut scorers: Vec<TermScorer>,
|
||||
mut threshold: Score,
|
||||
|
||||
@@ -174,9 +174,9 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
|
||||
into_box_scorer(specialized_scorer, &self.score_combiner_fn)
|
||||
})
|
||||
} else {
|
||||
self.complex_scorer(reader, boost, &DoNothingCombiner::default)
|
||||
self.complex_scorer(reader, boost, DoNothingCombiner::default)
|
||||
.map(|specialized_scorer| {
|
||||
into_box_scorer(specialized_scorer, &DoNothingCombiner::default)
|
||||
into_box_scorer(specialized_scorer, DoNothingCombiner::default)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ impl Explanation {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the value associated to the current node.
|
||||
/// Returns the value associated with the current node.
|
||||
pub fn value(&self) -> Score {
|
||||
self.value
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use crate::{DocId, Score};
|
||||
|
||||
/// Returns the intersection scorer.
|
||||
///
|
||||
/// The score associated to the documents is the sum of the
|
||||
/// The score associated with the documents is the sum of the
|
||||
/// score of the `Scorer`s given in argument.
|
||||
///
|
||||
/// For better performance, the function uses a
|
||||
|
||||
@@ -21,6 +21,7 @@ mod range_query;
|
||||
mod regex_query;
|
||||
mod reqopt_scorer;
|
||||
mod scorer;
|
||||
mod set_query;
|
||||
mod term_query;
|
||||
mod union;
|
||||
mod weight;
|
||||
@@ -58,6 +59,7 @@ pub use self::score_combiner::{
|
||||
DisjunctionMaxCombiner, ScoreCombiner, SumCombiner, SumWithCoordsCombiner,
|
||||
};
|
||||
pub use self::scorer::Scorer;
|
||||
pub use self::set_query::TermSetQuery;
|
||||
pub use self::term_query::TermQuery;
|
||||
pub use self::union::Union;
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -33,9 +33,9 @@ impl Ord for ScoreTerm {
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct used as helper to build [`MoreLikeThisQuery`]
|
||||
/// This more-like-this implementation is inspired by the Appache Lucene
|
||||
/// amd closely follows the same implementation with adaptabtion to Tantivy vocabulary and API.
|
||||
/// A struct used as helper to build [`MoreLikeThisQuery`](crate::query::MoreLikeThisQuery)
|
||||
/// This more-like-this implementation is inspired by the Apache Lucene
|
||||
/// and closely follows the same implementation with adaptation to Tantivy vocabulary and API.
|
||||
///
|
||||
/// [MoreLikeThis](https://github.com/apache/lucene/blob/main/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java#L147)
|
||||
/// [MoreLikeThisQuery](https://github.com/apache/lucene/blob/main/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThisQuery.java#L36)
|
||||
|
||||
@@ -119,7 +119,7 @@ impl PhraseQuery {
|
||||
}
|
||||
|
||||
impl Query for PhraseQuery {
|
||||
/// Create the weight associated to a query.
|
||||
/// Create the weight associated with a query.
|
||||
///
|
||||
/// See [`Weight`].
|
||||
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> crate::Result<Box<dyn Weight>> {
|
||||
|
||||
@@ -42,7 +42,7 @@ use crate::{DocAddress, Term};
|
||||
/// [`Scorer`]: crate::query::Scorer
|
||||
/// [`SegmentReader`]: crate::SegmentReader
|
||||
pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
|
||||
/// Create the weight associated to a query.
|
||||
/// Create the weight associated with a query.
|
||||
///
|
||||
/// If scoring is not required, setting `scoring_enabled` to `false`
|
||||
/// can increase performances.
|
||||
@@ -67,7 +67,7 @@ pub trait Query: QueryClone + Send + Sync + downcast_rs::Downcast + fmt::Debug {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Extract all of the terms associated to the query and pass them to the
|
||||
/// Extract all of the terms associated with the query and pass them to the
|
||||
/// given closure.
|
||||
///
|
||||
/// Each term is associated with a boolean indicating whether
|
||||
|
||||
@@ -610,7 +610,7 @@ impl QueryParser {
|
||||
if let Some((field, path)) = self.split_full_path(full_path) {
|
||||
return Ok(vec![(field, path, literal.phrase.as_str())]);
|
||||
}
|
||||
// We need to add terms associated to json default fields.
|
||||
// We need to add terms associated with json default fields.
|
||||
let triplets: Vec<(Field, &str, &str)> = self
|
||||
.default_indexed_json_fields()
|
||||
.map(|json_field| (json_field, full_path.as_str(), literal.phrase.as_str()))
|
||||
|
||||
222
src/query/set_query.rs
Normal file
222
src/query/set_query.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use tantivy_fst::raw::CompiledAddr;
|
||||
use tantivy_fst::{Automaton, Map};
|
||||
|
||||
use crate::query::score_combiner::DoNothingCombiner;
|
||||
use crate::query::{AutomatonWeight, BooleanWeight, Occur, Query, Weight};
|
||||
use crate::schema::Field;
|
||||
use crate::{Searcher, Term};
|
||||
|
||||
/// A Term Set Query matches all of the documents containing any of the Term provided
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TermSetQuery {
|
||||
terms_map: HashMap<Field, Vec<Term>>,
|
||||
}
|
||||
|
||||
impl TermSetQuery {
|
||||
/// Create a Term Set Query
|
||||
pub fn new<T: IntoIterator<Item = Term>>(terms: T) -> Self {
|
||||
let mut terms_map: HashMap<_, Vec<_>> = HashMap::new();
|
||||
for term in terms {
|
||||
terms_map.entry(term.field()).or_default().push(term);
|
||||
}
|
||||
|
||||
for terms in terms_map.values_mut() {
|
||||
terms.sort_unstable();
|
||||
terms.dedup();
|
||||
}
|
||||
|
||||
TermSetQuery { terms_map }
|
||||
}
|
||||
|
||||
fn specialized_weight(
|
||||
&self,
|
||||
searcher: &Searcher,
|
||||
) -> crate::Result<BooleanWeight<DoNothingCombiner>> {
|
||||
let mut sub_queries: Vec<(_, Box<dyn Weight>)> = Vec::with_capacity(self.terms_map.len());
|
||||
|
||||
for (&field, sorted_terms) in self.terms_map.iter() {
|
||||
let field_entry = searcher.schema().get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
if !field_type.is_indexed() {
|
||||
let error_msg = format!("Field {:?} is not indexed.", field_entry.name());
|
||||
return Err(crate::TantivyError::SchemaError(error_msg));
|
||||
}
|
||||
|
||||
// In practice this won't fail because:
|
||||
// - we are writing to memory, so no IoError
|
||||
// - Terms are ordered
|
||||
let map = Map::from_iter(sorted_terms.iter().map(|key| (key.value_bytes(), 0)))
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
|
||||
|
||||
sub_queries.push((
|
||||
Occur::Should,
|
||||
Box::new(AutomatonWeight::new(field, SetDfaWrapper(map))),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(BooleanWeight::new(
|
||||
sub_queries,
|
||||
false,
|
||||
Box::new(|| DoNothingCombiner),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for TermSetQuery {
|
||||
fn weight(
|
||||
&self,
|
||||
searcher: &Searcher,
|
||||
_scoring_enabled: bool,
|
||||
) -> crate::Result<Box<dyn Weight>> {
|
||||
Ok(Box::new(self.specialized_weight(searcher)?))
|
||||
}
|
||||
}
|
||||
|
||||
struct SetDfaWrapper(Map<Vec<u8>>);
|
||||
|
||||
impl Automaton for SetDfaWrapper {
|
||||
type State = Option<CompiledAddr>;
|
||||
|
||||
fn start(&self) -> Option<CompiledAddr> {
|
||||
Some(self.0.as_ref().root().addr())
|
||||
}
|
||||
|
||||
fn is_match(&self, state_opt: &Option<CompiledAddr>) -> bool {
|
||||
if let Some(state) = state_opt {
|
||||
self.0.as_ref().node(*state).is_final()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&self, state_opt: &Option<CompiledAddr>, byte: u8) -> Option<CompiledAddr> {
|
||||
let state = state_opt.as_ref()?;
|
||||
let node = self.0.as_ref().node(*state);
|
||||
let transition = node.find_input(byte)?;
|
||||
Some(node.transition_addr(transition))
|
||||
}
|
||||
|
||||
fn can_match(&self, state: &Self::State) -> bool {
|
||||
state.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::collector::TopDocs;
|
||||
use crate::query::TermSetQuery;
|
||||
use crate::schema::{Schema, TEXT};
|
||||
use crate::{assert_nearly_equals, Index, Term};
|
||||
|
||||
#[test]
|
||||
pub fn test_term_set_query() -> crate::Result<()> {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let field1 = schema_builder.add_text_field("field1", TEXT);
|
||||
let field2 = schema_builder.add_text_field("field1", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_for_tests()?;
|
||||
index_writer.add_document(doc!(
|
||||
field1 => "doc1",
|
||||
field2 => "val1",
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
field1 => "doc2",
|
||||
field2 => "val2",
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
field1 => "doc3",
|
||||
field2 => "val3",
|
||||
))?;
|
||||
index_writer.add_document(doc!(
|
||||
field1 => "val3",
|
||||
field2 => "doc3",
|
||||
))?;
|
||||
index_writer.commit()?;
|
||||
}
|
||||
let reader = index.reader()?;
|
||||
let searcher = reader.searcher();
|
||||
|
||||
{
|
||||
// single element
|
||||
let terms = vec![Term::from_field_text(field1, "doc1")];
|
||||
|
||||
let term_set_query = TermSetQuery::new(terms);
|
||||
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(top_docs.len(), 1, "Expected 1 document");
|
||||
let (score, _) = top_docs[0];
|
||||
assert_nearly_equals!(1.0, score);
|
||||
}
|
||||
|
||||
{
|
||||
// single element, absent
|
||||
let terms = vec![Term::from_field_text(field1, "doc4")];
|
||||
|
||||
let term_set_query = TermSetQuery::new(terms);
|
||||
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(1))?;
|
||||
assert!(top_docs.is_empty(), "Expected 0 document");
|
||||
}
|
||||
|
||||
{
|
||||
// multiple elements
|
||||
let terms = vec![
|
||||
Term::from_field_text(field1, "doc1"),
|
||||
Term::from_field_text(field1, "doc2"),
|
||||
];
|
||||
|
||||
let term_set_query = TermSetQuery::new(terms);
|
||||
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(2))?;
|
||||
assert_eq!(top_docs.len(), 2, "Expected 2 documents");
|
||||
for (score, _) in top_docs {
|
||||
assert_nearly_equals!(1.0, score);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// multiple elements, mixed fields
|
||||
let terms = vec![
|
||||
Term::from_field_text(field1, "doc1"),
|
||||
Term::from_field_text(field1, "doc1"),
|
||||
Term::from_field_text(field2, "val2"),
|
||||
];
|
||||
|
||||
let term_set_query = TermSetQuery::new(terms);
|
||||
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
|
||||
|
||||
assert_eq!(top_docs.len(), 2, "Expected 2 document");
|
||||
for (score, _) in top_docs {
|
||||
assert_nearly_equals!(1.0, score);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// no field crosstalk
|
||||
let terms = vec![Term::from_field_text(field1, "doc3")];
|
||||
|
||||
let term_set_query = TermSetQuery::new(terms);
|
||||
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
|
||||
assert_eq!(top_docs.len(), 1, "Expected 1 document");
|
||||
|
||||
let terms = vec![Term::from_field_text(field2, "doc3")];
|
||||
|
||||
let term_set_query = TermSetQuery::new(terms);
|
||||
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
|
||||
assert_eq!(top_docs.len(), 1, "Expected 1 document");
|
||||
|
||||
let terms = vec![
|
||||
Term::from_field_text(field1, "doc3"),
|
||||
Term::from_field_text(field2, "doc3"),
|
||||
];
|
||||
|
||||
let term_set_query = TermSetQuery::new(terms);
|
||||
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
|
||||
assert_eq!(top_docs.len(), 2, "Expected 2 document");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -263,8 +263,7 @@ impl InnerIndexReader {
|
||||
/// It controls when a new version of the index should be loaded and lends
|
||||
/// you instances of `Searcher` for the last loaded version.
|
||||
///
|
||||
/// `Clone` does not clone the different pool of searcher. `IndexReader`
|
||||
/// just wraps an `Arc`.
|
||||
/// `IndexReader` just wraps an `Arc`.
|
||||
#[derive(Clone)]
|
||||
pub struct IndexReader {
|
||||
inner: Arc<InnerIndexReader>,
|
||||
@@ -294,9 +293,6 @@ impl IndexReader {
|
||||
///
|
||||
/// This method should be called every single time a search
|
||||
/// query is performed.
|
||||
/// The searchers are taken from a pool of `num_searchers` searchers.
|
||||
/// If no searcher is available
|
||||
/// this may block.
|
||||
///
|
||||
/// The same searcher must be used for a given query, as it ensures
|
||||
/// the use of a consistent segment set.
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::ops::BitOr;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
|
||||
/// Define how an a bytes field should be handled by tantivy.
|
||||
/// Define how a bytes field should be handled by tantivy.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(from = "BytesOptionsDeser")]
|
||||
pub struct BytesOptions {
|
||||
@@ -83,7 +83,7 @@ impl BytesOptions {
|
||||
///
|
||||
/// Fast fields are designed for random access.
|
||||
/// Access time are similar to a random lookup in an array.
|
||||
/// If more than one value is associated to a fast field, only the last one is
|
||||
/// If more than one value is associated with a fast field, only the last one is
|
||||
/// kept.
|
||||
#[must_use]
|
||||
pub fn set_fast(mut self) -> BytesOptions {
|
||||
|
||||
@@ -104,7 +104,7 @@ impl DateOptions {
|
||||
///
|
||||
/// Fast fields are designed for random access.
|
||||
/// Access time are similar to a random lookup in an array.
|
||||
/// If more than one value is associated to a fast field, only the last one is
|
||||
/// If more than one value is associated with a fast field, only the last one is
|
||||
/// kept.
|
||||
#[must_use]
|
||||
pub fn set_fast(mut self, cardinality: Cardinality) -> DateOptions {
|
||||
|
||||
@@ -35,7 +35,7 @@ pub enum FacetParseError {
|
||||
/// For instance, an e-commerce website could
|
||||
/// have a `Facet` for `/electronics/tv_and_video/led_tv`.
|
||||
///
|
||||
/// A document can be associated to any number of facets.
|
||||
/// A document can be associated with any number of facets.
|
||||
/// The hierarchy implicitly imply that a document
|
||||
/// belonging to a facet also belongs to the ancestor of
|
||||
/// its facet. In the example above, `/electronics/tv_and_video/`
|
||||
|
||||
@@ -2,6 +2,7 @@ use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value as JsonValue;
|
||||
use thiserror::Error;
|
||||
|
||||
use super::Cardinality;
|
||||
use crate::schema::bytes_options::BytesOptions;
|
||||
use crate::schema::facet_options::FacetOptions;
|
||||
use crate::schema::{
|
||||
@@ -214,6 +215,26 @@ impl FieldType {
|
||||
}
|
||||
}
|
||||
|
||||
/// returns true if the field is fast.
|
||||
pub fn fastfield_cardinality(&self) -> Option<Cardinality> {
|
||||
match *self {
|
||||
FieldType::Bytes(ref bytes_options) if bytes_options.is_fast() => {
|
||||
Some(Cardinality::SingleValue)
|
||||
}
|
||||
FieldType::Str(ref text_options) if text_options.is_fast() => {
|
||||
Some(Cardinality::MultiValues)
|
||||
}
|
||||
FieldType::U64(ref int_options)
|
||||
| FieldType::I64(ref int_options)
|
||||
| FieldType::F64(ref int_options)
|
||||
| FieldType::Bool(ref int_options) => int_options.get_fastfield_cardinality(),
|
||||
FieldType::Date(ref date_options) => date_options.get_fastfield_cardinality(),
|
||||
FieldType::Facet(_) => Some(Cardinality::MultiValues),
|
||||
FieldType::JsonObject(_) => None,
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// returns true if the field is normed (see [fieldnorms](crate::fieldnorm)).
|
||||
pub fn has_fieldnorms(&self) -> bool {
|
||||
match *self {
|
||||
|
||||
109
src/schema/ip_options.rs
Normal file
109
src/schema/ip_options.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use std::ops::BitOr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
|
||||
use super::Cardinality;
|
||||
|
||||
/// Define how an ip field should be handled by tantivy.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct IpOptions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
fast: Option<Cardinality>,
|
||||
stored: bool,
|
||||
}
|
||||
|
||||
impl IpOptions {
|
||||
/// Returns true iff the value is a fast field.
|
||||
pub fn is_fast(&self) -> bool {
|
||||
self.fast.is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if the json object should be stored.
|
||||
pub fn is_stored(&self) -> bool {
|
||||
self.stored
|
||||
}
|
||||
|
||||
/// Returns the cardinality of the fastfield.
|
||||
///
|
||||
/// If the field has not been declared as a fastfield, then
|
||||
/// the method returns None.
|
||||
pub fn get_fastfield_cardinality(&self) -> Option<Cardinality> {
|
||||
self.fast
|
||||
}
|
||||
|
||||
/// Sets the field as stored
|
||||
#[must_use]
|
||||
pub fn set_stored(mut self) -> Self {
|
||||
self.stored = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the field as a fast field.
|
||||
///
|
||||
/// Fast fields are designed for random access.
|
||||
/// Access time are similar to a random lookup in an array.
|
||||
/// If more than one value is associated with a fast field, only the last one is
|
||||
/// kept.
|
||||
#[must_use]
|
||||
pub fn set_fast(mut self, cardinality: Cardinality) -> Self {
|
||||
self.fast = Some(cardinality);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<()> for IpOptions {
|
||||
fn from(_: ()) -> IpOptions {
|
||||
IpOptions::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FastFlag> for IpOptions {
|
||||
fn from(_: FastFlag) -> Self {
|
||||
IpOptions {
|
||||
stored: false,
|
||||
fast: Some(Cardinality::SingleValue),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StoredFlag> for IpOptions {
|
||||
fn from(_: StoredFlag) -> Self {
|
||||
IpOptions {
|
||||
stored: true,
|
||||
fast: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IndexedFlag> for IpOptions {
|
||||
fn from(_: IndexedFlag) -> Self {
|
||||
IpOptions {
|
||||
stored: false,
|
||||
fast: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Into<IpOptions>> BitOr<T> for IpOptions {
|
||||
type Output = IpOptions;
|
||||
|
||||
fn bitor(self, other: T) -> IpOptions {
|
||||
let other = other.into();
|
||||
IpOptions {
|
||||
stored: self.stored | other.stored,
|
||||
fast: self.fast.or(other.fast),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Head, Tail> From<SchemaFlagList<Head, Tail>> for IpOptions
|
||||
where
|
||||
Head: Clone,
|
||||
Tail: Clone,
|
||||
Self: BitOr<Output = Self> + From<Head> + From<Tail>,
|
||||
{
|
||||
fn from(head_tail: SchemaFlagList<Head, Tail>) -> Self {
|
||||
Self::from(head_tail.head) | Self::from(head_tail.tail)
|
||||
}
|
||||
}
|
||||
@@ -28,10 +28,10 @@
|
||||
//! use tantivy::schema::*;
|
||||
//! let mut schema_builder = Schema::builder();
|
||||
//! let title_options = TextOptions::default()
|
||||
//! .set_stored()
|
||||
//! .set_indexing_options(TextFieldIndexing::default()
|
||||
//! .set_tokenizer("default")
|
||||
//! .set_index_option(IndexRecordOption::WithFreqsAndPositions));
|
||||
//! .set_stored()
|
||||
//! .set_indexing_options(TextFieldIndexing::default()
|
||||
//! .set_tokenizer("default")
|
||||
//! .set_index_option(IndexRecordOption::WithFreqsAndPositions));
|
||||
//! schema_builder.add_text_field("title", title_options);
|
||||
//! let schema = schema_builder.build();
|
||||
//! ```
|
||||
@@ -45,8 +45,7 @@
|
||||
//! In the first phase, the ability to search for documents by the given field is determined by the
|
||||
//! [`IndexRecordOption`] of our [`TextOptions`].
|
||||
//!
|
||||
//! The effect of each possible setting is described more in detail
|
||||
//! [`TextIndexingOptions`](enum.TextIndexingOptions.html).
|
||||
//! The effect of each possible setting is described more in detail in [`TextOptions`].
|
||||
//!
|
||||
//! On the other hand setting the field as stored or not determines whether the field should be
|
||||
//! returned when [`Searcher::doc()`](crate::Searcher::doc) is called.
|
||||
@@ -60,8 +59,8 @@
|
||||
//! use tantivy::schema::*;
|
||||
//! let mut schema_builder = Schema::builder();
|
||||
//! let num_stars_options = NumericOptions::default()
|
||||
//! .set_stored()
|
||||
//! .set_indexed();
|
||||
//! .set_stored()
|
||||
//! .set_indexed();
|
||||
//! schema_builder.add_u64_field("num_stars", num_stars_options);
|
||||
//! let schema = schema_builder.build();
|
||||
//! ```
|
||||
@@ -79,8 +78,8 @@
|
||||
//! For convenience, it is possible to define your field indexing options by combining different
|
||||
//! flags using the `|` operator.
|
||||
//!
|
||||
//! For instance, a schema containing the two fields defined in the example above could be rewritten
|
||||
//! :
|
||||
//! For instance, a schema containing the two fields defined in the example above could be
|
||||
//! rewritten:
|
||||
//!
|
||||
//! ```
|
||||
//! use tantivy::schema::*;
|
||||
@@ -120,6 +119,7 @@ mod date_time_options;
|
||||
mod field;
|
||||
mod flags;
|
||||
mod index_record_option;
|
||||
mod ip_options;
|
||||
mod json_object_options;
|
||||
mod named_field_document;
|
||||
mod numeric_options;
|
||||
@@ -138,6 +138,7 @@ pub use self::field_type::{FieldType, Type};
|
||||
pub use self::field_value::FieldValue;
|
||||
pub use self::flags::{FAST, INDEXED, STORED};
|
||||
pub use self::index_record_option::IndexRecordOption;
|
||||
pub use self::ip_options::IpOptions;
|
||||
pub use self::json_object_options::JsonObjectOptions;
|
||||
pub use self::named_field_document::NamedFieldDocument;
|
||||
pub use self::numeric_options::NumericOptions;
|
||||
|
||||
@@ -7,10 +7,10 @@ use crate::schema::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
|
||||
/// Express whether a field is single-value or multi-valued.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||
pub enum Cardinality {
|
||||
/// The document must have exactly one value associated to the document.
|
||||
/// The document must have exactly one value associated with the document.
|
||||
#[serde(rename = "single")]
|
||||
SingleValue,
|
||||
/// The document can have any number of values associated to the document.
|
||||
/// The document can have any number of values associated with the document.
|
||||
/// This is more memory and CPU expensive than the `SingleValue` solution.
|
||||
#[serde(rename = "multi")]
|
||||
MultiValues,
|
||||
@@ -124,7 +124,7 @@ impl NumericOptions {
|
||||
///
|
||||
/// Fast fields are designed for random access.
|
||||
/// Access time are similar to a random lookup in an array.
|
||||
/// If more than one value is associated to a fast field, only the last one is
|
||||
/// If more than one value is associated with a fast field, only the last one is
|
||||
/// kept.
|
||||
#[must_use]
|
||||
pub fn set_fast(mut self, cardinality: Cardinality) -> NumericOptions {
|
||||
|
||||
@@ -258,7 +258,7 @@ impl Eq for InnerSchema {}
|
||||
pub struct Schema(Arc<InnerSchema>);
|
||||
|
||||
impl Schema {
|
||||
/// Return the `FieldEntry` associated to a `Field`.
|
||||
/// Return the `FieldEntry` associated with a `Field`.
|
||||
pub fn get_field_entry(&self, field: Field) -> &FieldEntry {
|
||||
&self.0.fields[field.field_id() as usize]
|
||||
}
|
||||
@@ -422,12 +422,8 @@ pub enum DocParsingError {
|
||||
impl DocParsingError {
|
||||
/// Builds a NotJson DocParsingError
|
||||
fn invalid_json(invalid_json: &str) -> Self {
|
||||
let sample_json: String = if invalid_json.len() < 20 {
|
||||
invalid_json.to_string()
|
||||
} else {
|
||||
format!("{:?}...", &invalid_json[0..20])
|
||||
};
|
||||
DocParsingError::InvalidJson(sample_json)
|
||||
let sample = invalid_json.chars().take(20).collect();
|
||||
DocParsingError::InvalidJson(sample)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -793,6 +789,11 @@ mod tests {
|
||||
))
|
||||
);
|
||||
}
|
||||
{
|
||||
// Short JSON, under the 20 char take.
|
||||
let json_err = schema.parse_document(r#"{"count": 50,}"#);
|
||||
assert_matches!(json_err, Err(InvalidJson(_)));
|
||||
}
|
||||
{
|
||||
let json_err = schema.parse_document(
|
||||
r#"{
|
||||
|
||||
@@ -54,27 +54,27 @@ impl Term {
|
||||
term
|
||||
}
|
||||
|
||||
/// Builds a term given a field, and a u64-value
|
||||
/// Builds a term given a field, and a `u64`-value
|
||||
pub fn from_field_u64(field: Field, val: u64) -> Term {
|
||||
Term::from_fast_value(field, &val)
|
||||
}
|
||||
|
||||
/// Builds a term given a field, and a i64-value
|
||||
/// Builds a term given a field, and a `i64`-value
|
||||
pub fn from_field_i64(field: Field, val: i64) -> Term {
|
||||
Term::from_fast_value(field, &val)
|
||||
}
|
||||
|
||||
/// Builds a term given a field, and a f64-value
|
||||
/// Builds a term given a field, and a `f64`-value
|
||||
pub fn from_field_f64(field: Field, val: f64) -> Term {
|
||||
Term::from_fast_value(field, &val)
|
||||
}
|
||||
|
||||
/// Builds a term given a field, and a f64-value
|
||||
/// Builds a term given a field, and a `bool`-value
|
||||
pub fn from_field_bool(field: Field, val: bool) -> Term {
|
||||
Term::from_fast_value(field, &val)
|
||||
}
|
||||
|
||||
/// Builds a term given a field, and a DateTime value
|
||||
/// Builds a term given a field, and a `DateTime` value
|
||||
pub fn from_field_date(field: Field, val: DateTime) -> Term {
|
||||
Term::from_fast_value(field, &val.truncate(DatePrecision::Seconds))
|
||||
}
|
||||
@@ -130,7 +130,7 @@ impl Term {
|
||||
self.set_fast_value(val);
|
||||
}
|
||||
|
||||
/// Sets a `i64` value in the term.
|
||||
/// Sets a `DateTime` value in the term.
|
||||
pub fn set_date(&mut self, date: DateTime) {
|
||||
self.set_fast_value(date);
|
||||
}
|
||||
|
||||
@@ -47,7 +47,9 @@ impl TextOptions {
|
||||
/// unchanged. The "default" tokenizer will store the terms as lower case and this will be
|
||||
/// reflected in the dictionary.
|
||||
///
|
||||
/// The original text can be retrieved via `ord_to_term` from the dictionary.
|
||||
/// The original text can be retrieved via
|
||||
/// [`TermDictionary::ord_to_term()`](crate::termdict::TermDictionary::ord_to_term)
|
||||
/// from the dictionary.
|
||||
#[must_use]
|
||||
pub fn set_fast(mut self) -> TextOptions {
|
||||
self.fast = true;
|
||||
@@ -161,7 +163,7 @@ impl TextFieldIndexing {
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the indexing options associated to this field.
|
||||
/// Returns the indexing options associated with this field.
|
||||
///
|
||||
/// See [`IndexRecordOption`] for more detail.
|
||||
pub fn index_option(&self) -> IndexRecordOption {
|
||||
|
||||
@@ -342,7 +342,7 @@ impl SnippetGenerator {
|
||||
|
||||
/// Generates a snippet for the given `Document`.
|
||||
///
|
||||
/// This method extract the text associated to the `SnippetGenerator`'s field
|
||||
/// This method extract the text associated with the `SnippetGenerator`'s field
|
||||
/// and computes a snippet.
|
||||
pub fn snippet_from_doc(&self, doc: &Document) -> Snippet {
|
||||
let text: String = doc
|
||||
|
||||
@@ -104,6 +104,13 @@ impl ZstdCompressor {
|
||||
value, opt_name, err
|
||||
)
|
||||
})?;
|
||||
if value >= 15 {
|
||||
warn!(
|
||||
"High zstd compression level detected: {:?}. High compression levels \
|
||||
(>=15) are slow and will limit indexing speed.",
|
||||
value
|
||||
)
|
||||
}
|
||||
compressor.compression_level = Some(value);
|
||||
}
|
||||
_ => {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use std::io;
|
||||
use std::iter::Sum;
|
||||
use std::ops::AddAssign;
|
||||
use std::ops::{AddAssign, Range};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use common::{BinarySerializable, HasLen, VInt};
|
||||
use common::{BinarySerializable, HasLen};
|
||||
use lru::LruCache;
|
||||
use ownedbytes::OwnedBytes;
|
||||
|
||||
@@ -140,10 +140,10 @@ impl StoreReader {
|
||||
self.cache.stats()
|
||||
}
|
||||
|
||||
/// Get checkpoint for DocId. The checkpoint can be used to load a block containing the
|
||||
/// Get checkpoint for `DocId`. The checkpoint can be used to load a block containing the
|
||||
/// document.
|
||||
///
|
||||
/// Advanced API. In most cases use [get](Self::get).
|
||||
/// Advanced API. In most cases use [`get`](Self::get).
|
||||
fn block_checkpoint(&self, doc_id: DocId) -> crate::Result<Checkpoint> {
|
||||
self.skip_index.seek(doc_id).ok_or_else(|| {
|
||||
crate::TantivyError::InvalidArgument(format!("Failed to lookup Doc #{}.", doc_id))
|
||||
@@ -160,7 +160,7 @@ impl StoreReader {
|
||||
|
||||
/// Loads and decompresses a block.
|
||||
///
|
||||
/// Advanced API. In most cases use [get](Self::get).
|
||||
/// Advanced API. In most cases use [`get`](Self::get).
|
||||
fn read_block(&self, checkpoint: &Checkpoint) -> io::Result<Block> {
|
||||
let cache_key = checkpoint.byte_range.start;
|
||||
if let Some(block) = self.cache.get_from_cache(cache_key) {
|
||||
@@ -205,28 +205,21 @@ impl StoreReader {
|
||||
|
||||
/// Advanced API.
|
||||
///
|
||||
/// In most cases use [get_document_bytes](Self::get_document_bytes).
|
||||
/// In most cases use [`get_document_bytes`](Self::get_document_bytes).
|
||||
fn get_document_bytes_from_block(
|
||||
block: OwnedBytes,
|
||||
doc_id: DocId,
|
||||
checkpoint: &Checkpoint,
|
||||
) -> crate::Result<OwnedBytes> {
|
||||
let mut cursor = &block[..];
|
||||
let cursor_len_before = cursor.len();
|
||||
for _ in checkpoint.doc_range.start..doc_id {
|
||||
let doc_length = VInt::deserialize(&mut cursor)?.val() as usize;
|
||||
cursor = &cursor[doc_length..];
|
||||
}
|
||||
let doc_pos = doc_id - checkpoint.doc_range.start;
|
||||
|
||||
let doc_length = VInt::deserialize(&mut cursor)?.val() as usize;
|
||||
let start_pos = cursor_len_before - cursor.len();
|
||||
let end_pos = cursor_len_before - cursor.len() + doc_length;
|
||||
Ok(block.slice(start_pos..end_pos))
|
||||
let range = block_read_index(&block, doc_pos)?;
|
||||
Ok(block.slice(range))
|
||||
}
|
||||
|
||||
/// Iterator over all Documents in their order as they are stored in the doc store.
|
||||
/// Use this, if you want to extract all Documents from the doc store.
|
||||
/// The alive_bitset has to be forwarded from the `SegmentReader` or the results maybe wrong.
|
||||
/// The `alive_bitset` has to be forwarded from the `SegmentReader` or the results may be wrong.
|
||||
pub fn iter<'a: 'b, 'b>(
|
||||
&'b self,
|
||||
alive_bitset: Option<&'a AliveBitSet>,
|
||||
@@ -237,9 +230,9 @@ impl StoreReader {
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterator over all RawDocuments in their order as they are stored in the doc store.
|
||||
/// Iterator over all raw Documents in their order as they are stored in the doc store.
|
||||
/// Use this, if you want to extract all Documents from the doc store.
|
||||
/// The alive_bitset has to be forwarded from the `SegmentReader` or the results maybe wrong.
|
||||
/// The `alive_bitset` has to be forwarded from the `SegmentReader` or the results may be wrong.
|
||||
pub(crate) fn iter_raw<'a: 'b, 'b>(
|
||||
&'b self,
|
||||
alive_bitset: Option<&'a AliveBitSet>,
|
||||
@@ -254,9 +247,7 @@ impl StoreReader {
|
||||
let mut curr_block = curr_checkpoint
|
||||
.as_ref()
|
||||
.map(|checkpoint| self.read_block(checkpoint).map_err(|e| e.kind())); // map error in order to enable cloning
|
||||
let mut block_start_pos = 0;
|
||||
let mut num_skipped = 0;
|
||||
let mut reset_block_pos = false;
|
||||
let mut doc_pos = 0;
|
||||
(0..last_doc_id)
|
||||
.filter_map(move |doc_id| {
|
||||
// filter_map is only used to resolve lifetime issues between the two closures on
|
||||
@@ -268,24 +259,19 @@ impl StoreReader {
|
||||
curr_block = curr_checkpoint
|
||||
.as_ref()
|
||||
.map(|checkpoint| self.read_block(checkpoint).map_err(|e| e.kind()));
|
||||
reset_block_pos = true;
|
||||
num_skipped = 0;
|
||||
doc_pos = 0;
|
||||
}
|
||||
|
||||
let alive = alive_bitset.map_or(true, |bitset| bitset.is_alive(doc_id));
|
||||
if alive {
|
||||
let ret = Some((curr_block.clone(), num_skipped, reset_block_pos));
|
||||
// the map block will move over the num_skipped, so we reset to 0
|
||||
num_skipped = 0;
|
||||
reset_block_pos = false;
|
||||
ret
|
||||
let res = if alive {
|
||||
Some((curr_block.clone(), doc_pos))
|
||||
} else {
|
||||
// we keep the number of skipped documents to move forward in the map block
|
||||
num_skipped += 1;
|
||||
None
|
||||
}
|
||||
};
|
||||
doc_pos += 1;
|
||||
res
|
||||
})
|
||||
.map(move |(block, num_skipped, reset_block_pos)| {
|
||||
.map(move |(block, doc_pos)| {
|
||||
let block = block
|
||||
.ok_or_else(|| {
|
||||
DataCorruption::comment_only(
|
||||
@@ -296,30 +282,9 @@ impl StoreReader {
|
||||
.map_err(|error_kind| {
|
||||
std::io::Error::new(error_kind, "error when reading block in doc store")
|
||||
})?;
|
||||
// this flag is set, when filter_map moved to the next block
|
||||
if reset_block_pos {
|
||||
block_start_pos = 0;
|
||||
}
|
||||
let mut cursor = &block[block_start_pos..];
|
||||
let mut pos = 0;
|
||||
// move forward 1 doc + num_skipped in block and return length of current doc
|
||||
let doc_length = loop {
|
||||
let doc_length = VInt::deserialize(&mut cursor)?.val() as usize;
|
||||
let num_bytes_read = block[block_start_pos..].len() - cursor.len();
|
||||
block_start_pos += num_bytes_read;
|
||||
|
||||
pos += 1;
|
||||
if pos == num_skipped + 1 {
|
||||
break doc_length;
|
||||
} else {
|
||||
block_start_pos += doc_length;
|
||||
cursor = &block[block_start_pos..];
|
||||
}
|
||||
};
|
||||
let end_pos = block_start_pos + doc_length;
|
||||
let doc_bytes = block.slice(block_start_pos..end_pos);
|
||||
block_start_pos = end_pos;
|
||||
Ok(doc_bytes)
|
||||
let range = block_read_index(&block, doc_pos)?;
|
||||
Ok(block.slice(range))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -329,11 +294,33 @@ impl StoreReader {
|
||||
}
|
||||
}
|
||||
|
||||
fn block_read_index(block: &[u8], doc_pos: u32) -> crate::Result<Range<usize>> {
|
||||
let doc_pos = doc_pos as usize;
|
||||
let size_of_u32 = std::mem::size_of::<u32>();
|
||||
|
||||
let index_len_pos = block.len() - size_of_u32;
|
||||
let index_len = u32::deserialize(&mut &block[index_len_pos..])? as usize;
|
||||
|
||||
if doc_pos > index_len {
|
||||
return Err(crate::TantivyError::InternalError(
|
||||
"Attempted to read doc from wrong block".to_owned(),
|
||||
));
|
||||
}
|
||||
|
||||
let index_start = block.len() - (index_len + 1) * size_of_u32;
|
||||
let index = &block[index_start..index_start + index_len * size_of_u32];
|
||||
|
||||
let start_offset = u32::deserialize(&mut &index[doc_pos * size_of_u32..])? as usize;
|
||||
let end_offset = u32::deserialize(&mut &index[(doc_pos + 1) * size_of_u32..])
|
||||
.unwrap_or(index_start as u32) as usize;
|
||||
Ok(start_offset..end_offset)
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickwit")]
|
||||
impl StoreReader {
|
||||
/// Advanced API.
|
||||
///
|
||||
/// In most cases use [get_async](Self::get_async)
|
||||
/// In most cases use [`get_async`](Self::get_async)
|
||||
///
|
||||
/// Loads and decompresses a block asynchronously.
|
||||
async fn read_block_async(&self, checkpoint: &Checkpoint) -> crate::AsyncIoResult<Block> {
|
||||
@@ -357,14 +344,14 @@ impl StoreReader {
|
||||
Ok(decompressed_block)
|
||||
}
|
||||
|
||||
/// Fetches a document asynchronously.
|
||||
/// Reads raw bytes of a given document asynchronously.
|
||||
pub async fn get_document_bytes_async(&self, doc_id: DocId) -> crate::Result<OwnedBytes> {
|
||||
let checkpoint = self.block_checkpoint(doc_id)?;
|
||||
let block = self.read_block_async(&checkpoint).await?;
|
||||
Self::get_document_bytes_from_block(block, doc_id, &checkpoint)
|
||||
}
|
||||
|
||||
/// Reads raw bytes of a given document. Async version of [get](Self::get).
|
||||
/// Fetches a document asynchronously. Async version of [`get`](Self::get).
|
||||
pub async fn get_async(&self, doc_id: DocId) -> crate::Result<Document> {
|
||||
let mut doc_bytes = self.get_document_bytes_async(doc_id).await?;
|
||||
Ok(Document::deserialize(&mut doc_bytes)?)
|
||||
@@ -427,7 +414,7 @@ mod tests {
|
||||
assert_eq!(store.cache_stats().cache_hits, 1);
|
||||
assert_eq!(store.cache_stats().cache_misses, 2);
|
||||
|
||||
assert_eq!(store.cache.peek_lru(), Some(9210));
|
||||
assert_eq!(store.cache.peek_lru(), Some(11163));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use common::{BinarySerializable, VInt};
|
||||
use common::BinarySerializable;
|
||||
|
||||
use super::compressors::Compressor;
|
||||
use super::StoreReader;
|
||||
@@ -22,6 +22,7 @@ pub struct StoreWriter {
|
||||
num_docs_in_current_block: DocId,
|
||||
intermediary_buffer: Vec<u8>,
|
||||
current_block: Vec<u8>,
|
||||
doc_pos: Vec<u32>,
|
||||
block_compressor: BlockCompressor,
|
||||
}
|
||||
|
||||
@@ -42,6 +43,7 @@ impl StoreWriter {
|
||||
block_size,
|
||||
num_docs_in_current_block: 0,
|
||||
intermediary_buffer: Vec::new(),
|
||||
doc_pos: Vec::new(),
|
||||
current_block: Vec::new(),
|
||||
block_compressor,
|
||||
})
|
||||
@@ -53,12 +55,17 @@ impl StoreWriter {
|
||||
|
||||
/// The memory used (inclusive childs)
|
||||
pub fn mem_usage(&self) -> usize {
|
||||
self.intermediary_buffer.capacity() + self.current_block.capacity()
|
||||
self.intermediary_buffer.capacity()
|
||||
+ self.current_block.capacity()
|
||||
+ self.doc_pos.capacity() * std::mem::size_of::<u32>()
|
||||
}
|
||||
|
||||
/// Checks if the current block is full, and if so, compresses and flushes it.
|
||||
fn check_flush_block(&mut self) -> io::Result<()> {
|
||||
if self.current_block.len() > self.block_size {
|
||||
// this does not count the VInt storing the index lenght itself, but it is negligible in
|
||||
// front of everything else.
|
||||
let index_len = self.doc_pos.len() * std::mem::size_of::<usize>();
|
||||
if self.current_block.len() + index_len > self.block_size {
|
||||
self.send_current_block_to_compressor()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -70,8 +77,19 @@ impl StoreWriter {
|
||||
if self.current_block.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let size_of_u32 = std::mem::size_of::<u32>();
|
||||
self.current_block
|
||||
.reserve((self.doc_pos.len() + 1) * size_of_u32);
|
||||
|
||||
for pos in self.doc_pos.iter() {
|
||||
pos.serialize(&mut self.current_block)?;
|
||||
}
|
||||
(self.doc_pos.len() as u32).serialize(&mut self.current_block)?;
|
||||
|
||||
self.block_compressor
|
||||
.compress_block_and_write(&self.current_block, self.num_docs_in_current_block)?;
|
||||
self.doc_pos.clear();
|
||||
self.current_block.clear();
|
||||
self.num_docs_in_current_block = 0;
|
||||
Ok(())
|
||||
@@ -87,8 +105,7 @@ impl StoreWriter {
|
||||
// calling store bytes would be preferable for code reuse, but then we can't use
|
||||
// intermediary_buffer due to the borrow checker
|
||||
// a new buffer costs ~1% indexing performance
|
||||
let doc_num_bytes = self.intermediary_buffer.len();
|
||||
VInt(doc_num_bytes as u64).serialize_into_vec(&mut self.current_block);
|
||||
self.doc_pos.push(self.current_block.len() as u32);
|
||||
self.current_block
|
||||
.write_all(&self.intermediary_buffer[..])?;
|
||||
self.num_docs_in_current_block += 1;
|
||||
@@ -101,8 +118,7 @@ impl StoreWriter {
|
||||
/// The document id is implicitly the current number
|
||||
/// of documents.
|
||||
pub fn store_bytes(&mut self, serialized_document: &[u8]) -> io::Result<()> {
|
||||
let doc_num_bytes = serialized_document.len();
|
||||
VInt(doc_num_bytes as u64).serialize_into_vec(&mut self.current_block);
|
||||
self.doc_pos.push(self.current_block.len() as u32);
|
||||
self.current_block.extend_from_slice(serialized_document);
|
||||
self.num_docs_in_current_block += 1;
|
||||
self.check_flush_block()?;
|
||||
|
||||
@@ -136,7 +136,7 @@ where A: Automaton
|
||||
}
|
||||
|
||||
/// Return the next `(key, value)` pair.
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))]
|
||||
#[allow(clippy::should_implement_trait)]
|
||||
pub fn next(&mut self) -> Option<(&[u8], &TermInfo)> {
|
||||
if self.advance() {
|
||||
Some((self.key(), self.value()))
|
||||
|
||||
@@ -138,12 +138,12 @@ impl TermDictionary {
|
||||
self.term_info_store.num_terms()
|
||||
}
|
||||
|
||||
/// Returns the ordinal associated to a given term.
|
||||
/// Returns the ordinal associated with a given term.
|
||||
pub fn term_ord<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TermOrdinal>> {
|
||||
Ok(self.fst_index.get(key))
|
||||
}
|
||||
|
||||
/// Stores the term associated to a given term ordinal in
|
||||
/// Stores the term associated with a given term ordinal in
|
||||
/// a `bytes` buffer.
|
||||
///
|
||||
/// Term ordinals are defined as the position of the term in
|
||||
|
||||
@@ -179,7 +179,7 @@ where
|
||||
}
|
||||
|
||||
/// Return the next `(key, value)` pair.
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))]
|
||||
#[allow(clippy::should_implement_trait)]
|
||||
pub fn next(&mut self) -> Option<(&[u8], &TermInfo)> {
|
||||
if self.advance() {
|
||||
Some((self.key(), self.value()))
|
||||
|
||||
@@ -52,7 +52,7 @@ impl<W: io::Write> TermDictionaryBuilder<W> {
|
||||
/// to insert_key and insert_value.
|
||||
///
|
||||
/// Prefer using `.insert(key, value)`
|
||||
#[allow(clippy::clippy::clippy::unnecessary_wraps)]
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
pub(crate) fn insert_key(&mut self, key: &[u8]) -> io::Result<()> {
|
||||
self.sstable_writer.write_key(key);
|
||||
Ok(())
|
||||
@@ -153,7 +153,7 @@ impl TermDictionary {
|
||||
self.num_terms as usize
|
||||
}
|
||||
|
||||
/// Returns the ordinal associated to a given term.
|
||||
/// Returns the ordinal associated with a given term.
|
||||
pub fn term_ord<K: AsRef<[u8]>>(&self, key: K) -> io::Result<Option<TermOrdinal>> {
|
||||
let mut term_ord = 0u64;
|
||||
let key_bytes = key.as_ref();
|
||||
@@ -167,7 +167,7 @@ impl TermDictionary {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Returns the term associated to a given term ordinal.
|
||||
/// Returns the term associated with a given term ordinal.
|
||||
///
|
||||
/// Term ordinals are defined as the position of the term in
|
||||
/// the sorted list of terms.
|
||||
|
||||
@@ -255,7 +255,7 @@ where T: Iterator<Item = usize>
|
||||
/// Emits all of the offsets where a codepoint starts
|
||||
/// or a codepoint ends.
|
||||
///
|
||||
/// By convention, we emit [0] for the empty string.
|
||||
/// By convention, we emit `[0]` for the empty string.
|
||||
struct CodepointFrontiers<'a> {
|
||||
s: &'a str,
|
||||
next_el: Option<usize>,
|
||||
|
||||
Reference in New Issue
Block a user