Compare commits

...

22 Commits

Author SHA1 Message Date
Paul Masurel
fb6d5acb82 Simplify code 2022-10-04 15:44:38 +09:00
PSeitz
4cf911d56a Merge pull request #1587 from quickwit-oss/no_get_val_in_serialize
remove get_val in serialization
2022-10-04 12:56:48 +08:00
Pascal Seitz
0f5cff762f move enumerate and remove computation 2022-10-04 12:30:19 +08:00
Pascal Seitz
6d9a123cf2 remove get_val in serialization
remove get_val in serialization and mark as unimplemented!()
replace get_val with iter in linear codec
remove MultivalueStartIndexRandomSeeker
replace MultivalueStartIndexIter with closure
Sample 100 values in linear codec
2022-10-04 12:01:25 +08:00
PSeitz
0f4a47816a Merge pull request #1582 from quickwit-oss/faster_sorted_field_values
use groupby instead of vec allocation
2022-10-04 09:36:24 +08:00
Pascal Seitz
b062ab2196 use groupby instead of vec allocation 2022-10-04 09:26:26 +08:00
Bruce Mitchener
a9d2f3db23 Tantivy requires Rust 1.62 or later. (#1583)
Tantivy needs the `total_cmp` feature to compile, which was stabilized
in Rust 1.62.
2022-10-03 18:31:07 +09:00
Bruce Mitchener
44e03791f9 Fix warnings when doc'ing private items. (#1579)
This also fixes a couple of typos, but plenty remain!
2022-10-03 14:24:00 +09:00
Bruce Mitchener
2d23763e9f Use u64::from boolean more. (#1580)
This case is inverted from the previous cases fixed.

This is from nightly clippy.
2022-10-03 14:17:50 +09:00
Bruce Mitchener
a24ae8d924 clippy: Fix needless-borrow warnings. (#1581)
These show on nightly clippy.
2022-10-03 14:15:09 +09:00
PSeitz
927dff5262 Merge pull request #1578 from quickwit-oss/dead_code
remove dead indexing code
2022-10-03 11:25:10 +08:00
Pascal Seitz
a695edcc95 remove dead indexing code 2022-10-03 09:44:02 +08:00
Paul Masurel
b4b4f3fa73 Removing default features for zstd (#1574) 2022-09-30 13:02:46 +09:00
PSeitz
b50e4b7c20 Merge pull request #1566 from quickwit-oss/fix_docstore_sorting
fix docstore settings for temp docstore
2022-09-30 10:10:36 +08:00
PSeitz
f8686ab1ec improve comments
Co-authored-by: Paul Masurel <paul@quickwit.io>
2022-09-30 10:06:34 +08:00
PSeitz
2fe42719d8 Merge pull request #1570 from quickwit-oss/no_sort_on_multi
validate index settings on create
2022-09-30 09:17:03 +08:00
PSeitz
fadd784a25 log improvements (#1564) 2022-09-30 09:39:26 +09:00
Pascal Seitz
0e94213af0 validate index settings on create 2022-09-29 18:58:09 +08:00
PSeitz
0da2a2e70d Merge pull request #1567 from quickwit-oss/dependabot/cargo/tantivy-fst-0.4.0
Update tantivy-fst requirement from 0.3.0 to 0.4.0
2022-09-29 10:00:16 +08:00
dependabot[bot]
0bcdf3cbbf Update tantivy-fst requirement from 0.3.0 to 0.4.0
Updates the requirements on [tantivy-fst](https://github.com/tantivy-search/fst) to permit the latest version.
- [Release notes](https://github.com/tantivy-search/fst/releases)
- [Commits](https://github.com/tantivy-search/fst/commits)

---
updated-dependencies:
- dependency-name: tantivy-fst
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-09-28 20:50:43 +00:00
Pascal Seitz
8f647b817f fix docstore settings for temp docstore
fixes #1565
2022-09-28 17:53:59 +08:00
trinity-1686a
a86b0df6f4 Add query matching terms in a set (#1539) 2022-09-28 09:43:18 +02:00
27 changed files with 464 additions and 228 deletions

View File

@@ -11,6 +11,7 @@ repository = "https://github.com/quickwit-oss/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2021"
rust-version = "1.62"
[dependencies]
oneshot = "0.1.3"
@@ -19,11 +20,11 @@ byteorder = "1.4.3"
crc32fast = "1.3.2"
once_cell = "1.10.0"
regex = { version = "1.5.5", default-features = false, features = ["std", "unicode"] }
tantivy-fst = "0.3.0"
tantivy-fst = "0.4.0"
memmap2 = { version = "0.5.3", optional = true }
lz4_flex = { version = "0.9.2", default-features = false, features = ["checked-decode"], optional = true }
brotli = { version = "3.3.4", optional = true }
zstd = { version = "0.11", optional = true }
zstd = { version = "0.11", optional = true, default-features = false }
snap = { version = "1.0.5", optional = true }
tempfile = { version = "3.3.0", optional = true }
log = "0.4.16"

View File

@@ -58,7 +58,7 @@ Distributed search is out of the scope of Tantivy, but if you are looking for th
# Getting started
Tantivy works on stable Rust (>= 1.27) and supports Linux, macOS, and Windows.
Tantivy works on stable Rust and supports Linux, macOS, and Windows.
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command-line interface that makes it easy for you to create a search engine,
@@ -81,9 +81,13 @@ There are many ways to support this project.
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Minimum supported Rust version
Tantivy currently requires at least Rust 1.62 or later to compile.
## Clone and build locally
Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
Tantivy compiles on stable Rust.
To check out and run tests, you can simply run:
```bash

View File

@@ -312,7 +312,7 @@ mod tests {
#[test]
fn estimation_test_bad_interpolation_case_monotonically_increasing() {
let mut data: Vec<u64> = (200..=20000_u64).collect();
let mut data: Vec<u64> = (201..=20000_u64).collect();
data.push(1_000_000);
let data: VecColumn = data.as_slice().into();

View File

@@ -68,29 +68,37 @@ impl Line {
}
// Same as train, but the intercept is only estimated from provided sample positions
pub fn estimate(ys: &dyn Column, sample_positions: &[u64]) -> Self {
pub fn estimate(sample_positions_and_values: &[(u64, u64)]) -> Self {
let first_val = sample_positions_and_values[0].1;
let last_val = sample_positions_and_values[sample_positions_and_values.len() - 1].1;
let num_vals = sample_positions_and_values[sample_positions_and_values.len() - 1].0 + 1;
Self::train_from(
ys,
sample_positions
.iter()
.cloned()
.map(|pos| (pos, ys.get_val(pos))),
first_val,
last_val,
num_vals,
sample_positions_and_values.iter().cloned(),
)
}
// Intercept is only computed from provided positions
fn train_from(ys: &dyn Column, positions_and_values: impl Iterator<Item = (u64, u64)>) -> Self {
let num_vals = if let Some(num_vals) = NonZeroU64::new(ys.num_vals() - 1) {
num_vals
fn train_from(
first_val: u64,
last_val: u64,
num_vals: u64,
positions_and_values: impl Iterator<Item = (u64, u64)>,
) -> Self {
// TODO replace with let else
let idx_last_val = if let Some(idx_last_val) = NonZeroU64::new(num_vals - 1) {
idx_last_val
} else {
return Line::default();
};
let y0 = ys.get_val(0);
let y1 = ys.get_val(num_vals.get());
let y0 = first_val;
let y1 = last_val;
// We first independently pick our slope.
let slope = compute_slope(y0, y1, num_vals);
let slope = compute_slope(y0, y1, idx_last_val);
// We picked our slope. Note that it does not have to be perfect.
// Now we need to compute the best intercept.
@@ -138,8 +146,12 @@ impl Line {
/// This function is only invariable by translation if all of the
/// `ys` are packaged into half of the space. (See heuristic below)
pub fn train(ys: &dyn Column) -> Self {
let first_val = ys.iter().next().unwrap();
let last_val = ys.iter().nth(ys.num_vals() as usize - 1).unwrap();
Self::train_from(
ys,
first_val,
last_val,
ys.num_vals(),
ys.iter().enumerate().map(|(pos, val)| (pos as u64, val)),
)
}

View File

@@ -126,18 +126,20 @@ impl FastFieldCodec for LinearCodec {
return None; // disable compressor for this case
}
// let's sample at 0%, 5%, 10% .. 95%, 100%
let num_vals = column.num_vals() as f32 / 100.0;
let sample_positions = (0..20)
.map(|pos| (num_vals * pos as f32 * 5.0) as u64)
.collect::<Vec<_>>();
let limit_num_vals = column.num_vals().min(100_000);
let line = Line::estimate(column, &sample_positions);
let num_samples = 100;
let step_size = (limit_num_vals / num_samples).max(1); // 20 samples
let mut sample_positions_and_values: Vec<_> = Vec::new();
for (pos, val) in column.iter().enumerate().step_by(step_size as usize) {
sample_positions_and_values.push((pos as u64, val));
}
let estimated_bit_width = sample_positions
let line = Line::estimate(&sample_positions_and_values);
let estimated_bit_width = sample_positions_and_values
.into_iter()
.map(|pos| {
let actual_value = column.get_val(pos);
.map(|(pos, actual_value)| {
let interpolated_val = line.eval(pos as u64);
actual_value.wrapping_sub(interpolated_val)
})
@@ -146,6 +148,7 @@ impl FastFieldCodec for LinearCodec {
.max()
.unwrap_or(0);
// Extrapolate to whole column
let num_bits = (estimated_bit_width as u64 * column.num_vals() as u64) + 64;
let num_bits_uncompressed = 64 * column.num_vals();
Some(num_bits as f32 / num_bits_uncompressed as f32)

View File

@@ -323,8 +323,8 @@ impl SegmentRangeCollector {
/// Converts the user provided f64 range value to fast field value space.
///
/// Internally fast field values are always stored as u64.
/// If the fast field has u64 [1,2,5], these values are stored as is in the fast field.
/// A fast field with f64 [1.0, 2.0, 5.0] is converted to u64 space, using a
/// If the fast field has u64 `[1, 2, 5]`, these values are stored as is in the fast field.
/// A fast field with f64 `[1.0, 2.0, 5.0]` is converted to u64 space, using a
/// monotonic mapping function, so the order is preserved.
///
/// Consequently, a f64 user range 1.0..3.0 needs to be converted to fast field value space using

View File

@@ -338,11 +338,7 @@ impl SegmentCollector for FacetSegmentCollector {
let mut previous_collapsed_ord: usize = usize::MAX;
for &facet_ord in &self.facet_ords_buf {
let collapsed_ord = self.collapse_mapping[facet_ord as usize];
self.counts[collapsed_ord] += if collapsed_ord == previous_collapsed_ord {
0
} else {
1
};
self.counts[collapsed_ord] += u64::from(collapsed_ord != previous_collapsed_ord);
previous_collapsed_ord = collapsed_ord;
}
}

View File

@@ -19,7 +19,7 @@ use crate::error::{DataCorruption, TantivyError};
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
use crate::indexer::segment_updater::save_metas;
use crate::reader::{IndexReader, IndexReaderBuilder};
use crate::schema::{Field, FieldType, Schema};
use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::IndexWriter;
@@ -152,9 +152,7 @@ impl IndexBuilder {
/// This should only be used for unit tests.
pub fn create_in_ram(self) -> Result<Index, TantivyError> {
let ram_directory = RamDirectory::create();
Ok(self
.create(ram_directory)
.expect("Creating a RamDirectory should never fail"))
self.create(ram_directory)
}
/// Creates a new index in a given filepath.
@@ -228,10 +226,44 @@ impl IndexBuilder {
))
}
}
fn validate(&self) -> crate::Result<()> {
if let Some(schema) = self.schema.as_ref() {
if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref() {
let schema_field = schema.get_field(&sort_by_field.field).ok_or_else(|| {
TantivyError::InvalidArgument(format!(
"Field to sort index {} not found in schema",
sort_by_field.field
))
})?;
let entry = schema.get_field_entry(schema_field);
if !entry.is_fast() {
return Err(TantivyError::InvalidArgument(format!(
"Field {} is no fast field. Field needs to be a single value fast field \
to be used to sort an index",
sort_by_field.field
)));
}
if entry.field_type().fastfield_cardinality() != Some(Cardinality::SingleValue) {
return Err(TantivyError::InvalidArgument(format!(
"Only single value fast field Cardinality supported for sorting index {}",
sort_by_field.field
)));
}
}
Ok(())
} else {
Err(TantivyError::InvalidArgument(
"no schema passed".to_string(),
))
}
}
/// Creates a new index given an implementation of the trait `Directory`.
///
/// If a directory previously existed, it will be erased.
fn create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
self.validate()?;
let dir = dir.into();
let directory = ManagedDirectory::wrap(dir)?;
save_new_metas(

View File

@@ -304,7 +304,7 @@ pub(crate) fn atomic_write(path: &Path, content: &[u8]) -> io::Result<()> {
"Path {:?} does not have parent directory.",
)
})?;
let mut tempfile = tempfile::Builder::new().tempfile_in(&parent_path)?;
let mut tempfile = tempfile::Builder::new().tempfile_in(parent_path)?;
tempfile.write_all(content)?;
tempfile.flush()?;
tempfile.as_file_mut().sync_data()?;

View File

@@ -1,5 +1,4 @@
use std::io;
use std::sync::Mutex;
use fastfield_codecs::{Column, MonotonicallyMappableToU64, VecColumn};
use fnv::FnvHashMap;
@@ -204,112 +203,63 @@ impl MultiValuedFastFieldWriter {
pub(crate) struct MultivalueStartIndex<'a, C: Column> {
column: &'a C,
doc_id_map: &'a DocIdMapping,
min_max_opt: Mutex<Option<(u64, u64)>>,
random_seeker: Mutex<MultivalueStartIndexRandomSeeker<'a, C>>,
}
struct MultivalueStartIndexRandomSeeker<'a, C: Column> {
seek_head: MultivalueStartIndexIter<'a, C>,
seek_next_id: u64,
}
impl<'a, C: Column> MultivalueStartIndexRandomSeeker<'a, C> {
fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
Self {
seek_head: MultivalueStartIndexIter {
column,
doc_id_map,
new_doc_id: 0,
offset: 0u64,
},
seek_next_id: 0u64,
}
}
min: u64,
max: u64,
}
impl<'a, C: Column> MultivalueStartIndex<'a, C> {
pub fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
assert_eq!(column.num_vals(), doc_id_map.num_old_doc_ids() as u64 + 1);
let (min, max) =
tantivy_bitpacker::minmax(iter_remapped_multivalue_index(doc_id_map, column))
.unwrap_or((0u64, 0u64));
MultivalueStartIndex {
column,
doc_id_map,
min_max_opt: Mutex::default(),
random_seeker: Mutex::new(MultivalueStartIndexRandomSeeker::new(column, doc_id_map)),
min,
max,
}
}
fn minmax(&self) -> (u64, u64) {
if let Some((min, max)) = *self.min_max_opt.lock().unwrap() {
return (min, max);
}
let (min, max) = tantivy_bitpacker::minmax(self.iter()).unwrap_or((0u64, 0u64));
*self.min_max_opt.lock().unwrap() = Some((min, max));
(min, max)
}
}
impl<'a, C: Column> Column for MultivalueStartIndex<'a, C> {
fn get_val(&self, idx: u64) -> u64 {
let mut random_seeker_lock = self.random_seeker.lock().unwrap();
if random_seeker_lock.seek_next_id > idx {
*random_seeker_lock =
MultivalueStartIndexRandomSeeker::new(self.column, self.doc_id_map);
}
let to_skip = idx - random_seeker_lock.seek_next_id;
random_seeker_lock.seek_next_id = idx + 1;
random_seeker_lock.seek_head.nth(to_skip as usize).unwrap()
fn get_val(&self, _idx: u64) -> u64 {
unimplemented!()
}
fn min_value(&self) -> u64 {
self.minmax().0
self.min
}
fn max_value(&self) -> u64 {
self.minmax().1
self.max
}
fn num_vals(&self) -> u64 {
(self.doc_id_map.num_new_doc_ids() + 1) as u64
}
fn iter<'b>(&'b self) -> Box<dyn Iterator<Item = u64> + 'b> {
Box::new(MultivalueStartIndexIter::new(self.column, self.doc_id_map))
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(iter_remapped_multivalue_index(
self.doc_id_map,
&self.column,
))
}
}
struct MultivalueStartIndexIter<'a, C: Column> {
pub column: &'a C,
pub doc_id_map: &'a DocIdMapping,
pub new_doc_id: usize,
pub offset: u64,
}
impl<'a, C: Column> MultivalueStartIndexIter<'a, C> {
fn new(column: &'a C, doc_id_map: &'a DocIdMapping) -> Self {
Self {
column,
doc_id_map,
new_doc_id: 0,
offset: 0,
}
}
}
impl<'a, C: Column> Iterator for MultivalueStartIndexIter<'a, C> {
type Item = u64;
fn next(&mut self) -> Option<Self::Item> {
if self.new_doc_id > self.doc_id_map.num_new_doc_ids() {
return None;
}
let new_doc_id = self.new_doc_id;
self.new_doc_id += 1;
let start_offset = self.offset;
if new_doc_id < self.doc_id_map.num_new_doc_ids() {
let old_doc = self.doc_id_map.get_old_doc_id(new_doc_id as u32) as u64;
let num_vals_for_doc = self.column.get_val(old_doc + 1) - self.column.get_val(old_doc);
self.offset += num_vals_for_doc;
}
Some(start_offset)
}
fn iter_remapped_multivalue_index<'a, C: Column>(
doc_id_map: &'a DocIdMapping,
column: &'a C,
) -> impl Iterator<Item = u64> + 'a {
let mut offset = 0;
let offsets = doc_id_map
.iter_old_doc_ids()
.map(move |old_doc| {
let num_vals_for_doc =
column.get_val(old_doc as u64 + 1) - column.get_val(old_doc as u64);
offset += num_vals_for_doc;
offset
});
std::iter::once(0u64).chain(offsets)
}
#[cfg(test)]
@@ -344,11 +294,5 @@ mod tests {
vec![0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
);
assert_eq!(multivalue_start_index.num_vals(), 11);
assert_eq!(multivalue_start_index.get_val(3), 2);
assert_eq!(multivalue_start_index.get_val(5), 5);
assert_eq!(multivalue_start_index.get_val(8), 21);
assert_eq!(multivalue_start_index.get_val(4), 3);
assert_eq!(multivalue_start_index.get_val(0), 0);
assert_eq!(multivalue_start_index.get_val(10), 55);
}
}

View File

@@ -391,15 +391,8 @@ impl<'map, 'bitp> Column for WriterFastFieldAccessProvider<'map, 'bitp> {
/// # Panics
///
/// May panic if `doc` is greater than the index.
fn get_val(&self, doc: u64) -> u64 {
if let Some(doc_id_map) = self.doc_id_map {
self.vals
.get(doc_id_map.get_old_doc_id(doc as u32) as usize) // consider extra
// FastFieldReader wrapper for
// non doc_id_map
} else {
self.vals.get(doc as usize)
}
fn get_val(&self, _doc: u64) -> u64 {
unimplemented!()
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {

View File

@@ -34,10 +34,6 @@ impl SegmentDocIdMapping {
self.new_doc_id_to_old_doc_addr.len()
}
pub(crate) fn get_old_doc_addr(&self, new_doc_id: DocId) -> DocAddress {
self.new_doc_id_to_old_doc_addr[new_doc_id as usize]
}
/// This flags means the segments are simply stacked in the order of their ordinal.
/// e.g. [(0, 1), .. (n, 1), (0, 2)..., (m, 2)]
///

View File

@@ -1395,6 +1395,35 @@ mod tests {
assert!(commit_again.is_ok());
}
#[test]
fn test_sort_by_multivalue_field_error() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let options = NumericOptions::default().set_fast(Cardinality::MultiValues);
schema_builder.add_u64_field("id", options);
let schema = schema_builder.build();
let settings = IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "id".to_string(),
order: Order::Desc,
}),
..Default::default()
};
let err = Index::builder()
.schema(schema)
.settings(settings)
.create_in_ram()
.unwrap_err();
assert_eq!(
err.to_string(),
"An invalid argument was passed: 'Only single value fast field Cardinality supported \
for sorting index id'"
);
Ok(())
}
#[test]
fn test_delete_with_sort_by_field() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();

View File

@@ -14,8 +14,8 @@ use crate::fastfield::{
};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
use crate::indexer::sorted_doc_id_column::SortedDocIdColumn;
use crate::indexer::sorted_doc_id_multivalue_column::SortedDocIdMultiValueColumn;
use crate::indexer::sorted_doc_id_column::RemappedDocIdColumn;
use crate::indexer::sorted_doc_id_multivalue_column::RemappedDocIdMultiValueColumn;
use crate::indexer::SegmentSerializer;
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
use crate::schema::{Cardinality, Field, FieldType, Schema};
@@ -310,7 +310,7 @@ impl IndexMerger {
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
let fast_field_accessor = SortedDocIdColumn::new(&self.readers, doc_id_mapping, field);
let fast_field_accessor = RemappedDocIdColumn::new(&self.readers, doc_id_mapping, field);
fast_field_serializer.create_auto_detect_u64_fast_field(field, fast_field_accessor)?;
Ok(())
@@ -428,14 +428,8 @@ impl IndexMerger {
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
reader_and_field_accessors: &[(&SegmentReader, T)],
) -> crate::Result<Vec<u64>> {
// We can now create our `idx` serializer, and in a second pass,
// can effectively push the different indexes.
// copying into a temp vec is not ideal, but the fast field codec api requires random
// access, which is used in the estimation. It's possible to 1. calculate random
// access on the fly or 2. change the codec api to make random access optional, but
// they both have also major drawbacks.
) -> crate::Result<()> {
// TODO Use `Column` implementation instead
let mut offsets = Vec::with_capacity(doc_id_mapping.len());
let mut offset = 0;
@@ -449,7 +443,7 @@ impl IndexMerger {
let fastfield_accessor = VecColumn::from(&offsets[..]);
fast_field_serializer.create_auto_detect_u64_fast_field(field, fastfield_accessor)?;
Ok(offsets)
Ok(())
}
/// Returns the fastfield index (index for the data, not the data).
fn write_multi_value_fast_field_idx(
@@ -457,7 +451,7 @@ impl IndexMerger {
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<Vec<u64>> {
) -> crate::Result<()> {
let reader_ordinal_and_field_accessors = self
.readers
.iter()
@@ -561,16 +555,16 @@ impl IndexMerger {
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
// Multifastfield consists in 2 fastfields.
// Multifastfield consists of 2 fastfields.
// The first serves as an index into the second one and is strictly increasing.
// The second contains the actual values.
// First we merge the idx fast field.
let offsets =
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
let fastfield_accessor =
SortedDocIdMultiValueColumn::new(&self.readers, doc_id_mapping, &offsets, field);
RemappedDocIdMultiValueColumn::new(&self.readers, doc_id_mapping, field);
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
field,
fastfield_accessor,

View File

@@ -24,12 +24,25 @@ impl SegmentSerializer {
// In the merge case this is not necessary because we can kmerge the already sorted
// segments
let remapping_required = segment.index().settings().sort_by_field.is_some() && !is_in_merge;
let store_component = if remapping_required {
SegmentComponent::TempStore
let settings = segment.index().settings().clone();
let store_writer = if remapping_required {
let store_write = segment.open_write(SegmentComponent::TempStore)?;
StoreWriter::new(
store_write,
crate::store::Compressor::None,
0, // we want random access on the docs, so we choose a minimal block size. Every
// doc will get its own block.
settings.docstore_compress_dedicated_thread,
)?
} else {
SegmentComponent::Store
let store_write = segment.open_write(SegmentComponent::Store)?;
StoreWriter::new(
store_write,
settings.docstore_compression,
settings.docstore_blocksize,
settings.docstore_compress_dedicated_thread,
)?
};
let store_write = segment.open_write(store_component)?;
let fast_field_write = segment.open_write(SegmentComponent::FastFields)?;
let fast_field_serializer = CompositeFastFieldSerializer::from_write(fast_field_write)?;
@@ -38,13 +51,6 @@ impl SegmentSerializer {
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
let postings_serializer = InvertedIndexSerializer::open(&mut segment)?;
let settings = segment.index().settings();
let store_writer = StoreWriter::new(
store_write,
settings.docstore_compression,
settings.docstore_blocksize,
settings.docstore_compress_dedicated_thread,
)?;
Ok(SegmentSerializer {
segment,
store_writer,

View File

@@ -1,4 +1,5 @@
use fastfield_codecs::MonotonicallyMappableToU64;
use itertools::Itertools;
use super::doc_id_mapping::{get_doc_id_mapping_from_field, DocIdMapping};
use super::operation::AddOperation;
@@ -157,7 +158,13 @@ impl SegmentWriter {
fn index_document(&mut self, doc: &Document) -> crate::Result<()> {
let doc_id = self.max_doc;
for (field, values) in doc.get_sorted_field_values() {
let vals_grouped_by_field = doc
.field_values()
.iter()
.sorted_by_key(|el| el.field())
.group_by(|el| el.field());
for (field, field_values) in &vals_grouped_by_field {
let values = field_values.map(|field_value| field_value.value());
let field_entry = self.schema.get_field_entry(field);
let make_schema_error = || {
crate::TantivyError::SchemaError(format!(
@@ -198,24 +205,16 @@ impl SegmentWriter {
}
FieldType::Str(_) => {
let mut token_streams: Vec<BoxTokenStream> = vec![];
let mut offsets = vec![];
let mut total_offset = 0;
for value in values {
match value {
Value::PreTokStr(tok_str) => {
offsets.push(total_offset);
if let Some(last_token) = tok_str.tokens.last() {
total_offset += last_token.offset_to;
}
token_streams
.push(PreTokenizedStream::from(tok_str.clone()).into());
}
Value::Str(ref text) => {
let text_analyzer =
&self.per_field_text_analyzers[field.field_id() as usize];
offsets.push(total_offset);
total_offset += text.len();
token_streams.push(text_analyzer.token_stream(text));
}
_ => (),
@@ -284,9 +283,8 @@ impl SegmentWriter {
}
FieldType::JsonObject(_) => {
let text_analyzer = &self.per_field_text_analyzers[field.field_id() as usize];
let json_values_it = values
.iter()
.map(|value| value.as_json().ok_or_else(make_schema_error));
let json_values_it =
values.map(|value| value.as_json().ok_or_else(make_schema_error));
index_json_values(
doc_id,
json_values_it,
@@ -374,9 +372,9 @@ fn remap_and_write(
doc_id_map,
)?;
debug!("resort-docstore");
// finalize temp docstore and create version, which reflects the doc_id_map
if let Some(doc_id_map) = doc_id_map {
debug!("resort-docstore");
let store_write = serializer
.segment_mut()
.open_write(SegmentComponent::Store)?;
@@ -393,7 +391,8 @@ fn remap_and_write(
serializer
.segment()
.open_read(SegmentComponent::TempStore)?,
50,
1, /* The docstore is configured to have one doc per block, and each doc is accessed
* only once: we don't need caching. */
)?;
for old_doc_id in doc_id_map.iter_old_doc_ids() {
let doc_bytes = store_read.get_document_bytes(old_doc_id)?;

View File

@@ -5,9 +5,9 @@ use itertools::Itertools;
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
use crate::schema::Field;
use crate::{DocAddress, SegmentReader};
use crate::SegmentReader;
pub(crate) struct SortedDocIdColumn<'a> {
pub(crate) struct RemappedDocIdColumn<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
fast_field_readers: Vec<Arc<dyn Column<u64>>>,
min_value: u64,
@@ -37,7 +37,7 @@ fn compute_min_max_val(
.into_option()
}
impl<'a> SortedDocIdColumn<'a> {
impl<'a> RemappedDocIdColumn<'a> {
pub(crate) fn new(
readers: &'a [SegmentReader],
doc_id_mapping: &'a SegmentDocIdMapping,
@@ -68,7 +68,7 @@ impl<'a> SortedDocIdColumn<'a> {
})
.collect::<Vec<_>>();
SortedDocIdColumn {
RemappedDocIdColumn {
doc_id_mapping,
fast_field_readers,
min_value,
@@ -78,13 +78,9 @@ impl<'a> SortedDocIdColumn<'a> {
}
}
impl<'a> Column for SortedDocIdColumn<'a> {
fn get_val(&self, doc: u64) -> u64 {
let DocAddress {
doc_id,
segment_ord,
} = self.doc_id_mapping.get_old_doc_addr(doc as u32);
self.fast_field_readers[segment_ord as usize].get_val(doc_id as u64)
impl<'a> Column for RemappedDocIdColumn<'a> {
fn get_val(&self, _doc: u64) -> u64 {
unimplemented!()
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {

View File

@@ -2,26 +2,23 @@ use std::cmp;
use fastfield_codecs::Column;
use crate::fastfield::{MultiValueLength, MultiValuedFastFieldReader};
use crate::fastfield::MultiValuedFastFieldReader;
use crate::indexer::doc_id_mapping::SegmentDocIdMapping;
use crate::schema::Field;
use crate::{DocId, SegmentReader};
use crate::SegmentReader;
// We can now initialize our serializer, and push it the different values
pub(crate) struct SortedDocIdMultiValueColumn<'a> {
pub(crate) struct RemappedDocIdMultiValueColumn<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
fast_field_readers: Vec<MultiValuedFastFieldReader<u64>>,
offsets: &'a [u64],
min_value: u64,
max_value: u64,
num_vals: u64,
}
impl<'a> SortedDocIdMultiValueColumn<'a> {
impl<'a> RemappedDocIdMultiValueColumn<'a> {
pub(crate) fn new(
readers: &'a [SegmentReader],
doc_id_mapping: &'a SegmentDocIdMapping,
offsets: &'a [u64],
field: Field,
) -> Self {
// Our values are bitpacked and we need to know what should be
@@ -58,10 +55,9 @@ impl<'a> SortedDocIdMultiValueColumn<'a> {
min_value = 0;
max_value = 0;
}
SortedDocIdMultiValueColumn {
RemappedDocIdMultiValueColumn {
doc_id_mapping,
fast_field_readers,
offsets,
min_value,
max_value,
num_vals: num_vals as u64,
@@ -69,26 +65,9 @@ impl<'a> SortedDocIdMultiValueColumn<'a> {
}
}
impl<'a> Column for SortedDocIdMultiValueColumn<'a> {
fn get_val(&self, pos: u64) -> u64 {
// use the offsets index to find the doc_id which will contain the position.
// the offsets are strictly increasing so we can do a binary search on it.
let new_doc_id: DocId = self.offsets.partition_point(|&offset| offset <= pos) as DocId - 1; // Offsets start at 0, so -1 is safe
// now we need to find the position of `pos` in the multivalued bucket
let num_pos_covered_until_now = self.offsets[new_doc_id as usize];
let pos_in_values = pos - num_pos_covered_until_now;
let old_doc_addr = self.doc_id_mapping.get_old_doc_addr(new_doc_id);
let num_vals =
self.fast_field_readers[old_doc_addr.segment_ord as usize].get_len(old_doc_addr.doc_id);
assert!(num_vals >= pos_in_values);
let mut vals = Vec::new();
self.fast_field_readers[old_doc_addr.segment_ord as usize]
.get_vals(old_doc_addr.doc_id, &mut vals);
vals[pos_in_values as usize]
impl<'a> Column for RemappedDocIdMultiValueColumn<'a> {
fn get_val(&self, _pos: u64) -> u64 {
unimplemented!()
}
fn iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {

View File

@@ -106,7 +106,7 @@ impl BlockDecoder {
pub trait VIntEncoder {
/// Compresses an array of `u32` integers,
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_ encoding)
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_encoding)
/// and variable bytes encoding.
///
/// The method takes an array of ints to compress, and returns

View File

@@ -144,7 +144,7 @@ fn advance_all_scorers_on_pivot(term_scorers: &mut Vec<TermScorerWithMaxScore>,
/// Implements the WAND (Weak AND) algorithm for dynamic pruning
/// described in the paper "Faster Top-k Document Retrieval Using Block-Max Indexes".
/// Link: http://engineering.nyu.edu/~suel/papers/bmw.pdf
/// Link: <http://engineering.nyu.edu/~suel/papers/bmw.pdf>
pub fn block_wand(
mut scorers: Vec<TermScorer>,
mut threshold: Score,

View File

@@ -174,9 +174,9 @@ impl<TScoreCombiner: ScoreCombiner + Sync> Weight for BooleanWeight<TScoreCombin
into_box_scorer(specialized_scorer, &self.score_combiner_fn)
})
} else {
self.complex_scorer(reader, boost, &DoNothingCombiner::default)
self.complex_scorer(reader, boost, DoNothingCombiner::default)
.map(|specialized_scorer| {
into_box_scorer(specialized_scorer, &DoNothingCombiner::default)
into_box_scorer(specialized_scorer, DoNothingCombiner::default)
})
}
}

View File

@@ -21,6 +21,7 @@ mod range_query;
mod regex_query;
mod reqopt_scorer;
mod scorer;
mod set_query;
mod term_query;
mod union;
mod weight;
@@ -58,6 +59,7 @@ pub use self::score_combiner::{
DisjunctionMaxCombiner, ScoreCombiner, SumCombiner, SumWithCoordsCombiner,
};
pub use self::scorer::Scorer;
pub use self::set_query::TermSetQuery;
pub use self::term_query::TermQuery;
pub use self::union::Union;
#[cfg(test)]

View File

@@ -33,9 +33,9 @@ impl Ord for ScoreTerm {
}
}
/// A struct used as helper to build [`MoreLikeThisQuery`]
/// This more-like-this implementation is inspired by the Appache Lucene
/// amd closely follows the same implementation with adaptabtion to Tantivy vocabulary and API.
/// A struct used as helper to build [`MoreLikeThisQuery`](crate::query::MoreLikeThisQuery)
/// This more-like-this implementation is inspired by the Apache Lucene
/// and closely follows the same implementation with adaptation to Tantivy vocabulary and API.
///
/// [MoreLikeThis](https://github.com/apache/lucene/blob/main/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java#L147)
/// [MoreLikeThisQuery](https://github.com/apache/lucene/blob/main/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThisQuery.java#L36)

222
src/query/set_query.rs Normal file
View File

@@ -0,0 +1,222 @@
use std::collections::HashMap;
use tantivy_fst::raw::CompiledAddr;
use tantivy_fst::{Automaton, Map};
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{AutomatonWeight, BooleanWeight, Occur, Query, Weight};
use crate::schema::Field;
use crate::{Searcher, Term};
/// A Term Set Query matches all of the documents containing any of the Term provided
#[derive(Debug, Clone)]
pub struct TermSetQuery {
terms_map: HashMap<Field, Vec<Term>>,
}
impl TermSetQuery {
/// Create a Term Set Query
pub fn new<T: IntoIterator<Item = Term>>(terms: T) -> Self {
let mut terms_map: HashMap<_, Vec<_>> = HashMap::new();
for term in terms {
terms_map.entry(term.field()).or_default().push(term);
}
for terms in terms_map.values_mut() {
terms.sort_unstable();
terms.dedup();
}
TermSetQuery { terms_map }
}
fn specialized_weight(
&self,
searcher: &Searcher,
) -> crate::Result<BooleanWeight<DoNothingCombiner>> {
let mut sub_queries: Vec<(_, Box<dyn Weight>)> = Vec::with_capacity(self.terms_map.len());
for (&field, sorted_terms) in self.terms_map.iter() {
let field_entry = searcher.schema().get_field_entry(field);
let field_type = field_entry.field_type();
if !field_type.is_indexed() {
let error_msg = format!("Field {:?} is not indexed.", field_entry.name());
return Err(crate::TantivyError::SchemaError(error_msg));
}
// In practice this won't fail because:
// - we are writing to memory, so no IoError
// - Terms are ordered
let map = Map::from_iter(sorted_terms.iter().map(|key| (key.value_bytes(), 0)))
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
sub_queries.push((
Occur::Should,
Box::new(AutomatonWeight::new(field, SetDfaWrapper(map))),
));
}
Ok(BooleanWeight::new(
sub_queries,
false,
Box::new(|| DoNothingCombiner),
))
}
}
impl Query for TermSetQuery {
fn weight(
&self,
searcher: &Searcher,
_scoring_enabled: bool,
) -> crate::Result<Box<dyn Weight>> {
Ok(Box::new(self.specialized_weight(searcher)?))
}
}
struct SetDfaWrapper(Map<Vec<u8>>);
impl Automaton for SetDfaWrapper {
type State = Option<CompiledAddr>;
fn start(&self) -> Option<CompiledAddr> {
Some(self.0.as_ref().root().addr())
}
fn is_match(&self, state_opt: &Option<CompiledAddr>) -> bool {
if let Some(state) = state_opt {
self.0.as_ref().node(*state).is_final()
} else {
false
}
}
fn accept(&self, state_opt: &Option<CompiledAddr>, byte: u8) -> Option<CompiledAddr> {
let state = state_opt.as_ref()?;
let node = self.0.as_ref().node(*state);
let transition = node.find_input(byte)?;
Some(node.transition_addr(transition))
}
fn can_match(&self, state: &Self::State) -> bool {
state.is_some()
}
}
#[cfg(test)]
mod tests {
use crate::collector::TopDocs;
use crate::query::TermSetQuery;
use crate::schema::{Schema, TEXT};
use crate::{assert_nearly_equals, Index, Term};
#[test]
pub fn test_term_set_query() -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field1 = schema_builder.add_text_field("field1", TEXT);
let field2 = schema_builder.add_text_field("field1", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_for_tests()?;
index_writer.add_document(doc!(
field1 => "doc1",
field2 => "val1",
))?;
index_writer.add_document(doc!(
field1 => "doc2",
field2 => "val2",
))?;
index_writer.add_document(doc!(
field1 => "doc3",
field2 => "val3",
))?;
index_writer.add_document(doc!(
field1 => "val3",
field2 => "doc3",
))?;
index_writer.commit()?;
}
let reader = index.reader()?;
let searcher = reader.searcher();
{
// single element
let terms = vec![Term::from_field_text(field1, "doc1")];
let term_set_query = TermSetQuery::new(terms);
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(2))?;
assert_eq!(top_docs.len(), 1, "Expected 1 document");
let (score, _) = top_docs[0];
assert_nearly_equals!(1.0, score);
}
{
// single element, absent
let terms = vec![Term::from_field_text(field1, "doc4")];
let term_set_query = TermSetQuery::new(terms);
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(1))?;
assert!(top_docs.is_empty(), "Expected 0 document");
}
{
// multiple elements
let terms = vec![
Term::from_field_text(field1, "doc1"),
Term::from_field_text(field1, "doc2"),
];
let term_set_query = TermSetQuery::new(terms);
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(2))?;
assert_eq!(top_docs.len(), 2, "Expected 2 documents");
for (score, _) in top_docs {
assert_nearly_equals!(1.0, score);
}
}
{
// multiple elements, mixed fields
let terms = vec![
Term::from_field_text(field1, "doc1"),
Term::from_field_text(field1, "doc1"),
Term::from_field_text(field2, "val2"),
];
let term_set_query = TermSetQuery::new(terms);
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
assert_eq!(top_docs.len(), 2, "Expected 2 document");
for (score, _) in top_docs {
assert_nearly_equals!(1.0, score);
}
}
{
// no field crosstalk
let terms = vec![Term::from_field_text(field1, "doc3")];
let term_set_query = TermSetQuery::new(terms);
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
assert_eq!(top_docs.len(), 1, "Expected 1 document");
let terms = vec![Term::from_field_text(field2, "doc3")];
let term_set_query = TermSetQuery::new(terms);
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
assert_eq!(top_docs.len(), 1, "Expected 1 document");
let terms = vec![
Term::from_field_text(field1, "doc3"),
Term::from_field_text(field2, "doc3"),
];
let term_set_query = TermSetQuery::new(terms);
let top_docs = searcher.search(&term_set_query, &TopDocs::with_limit(3))?;
assert_eq!(top_docs.len(), 2, "Expected 2 document");
}
Ok(())
}
}

View File

@@ -2,6 +2,7 @@ use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
use thiserror::Error;
use super::Cardinality;
use crate::schema::bytes_options::BytesOptions;
use crate::schema::facet_options::FacetOptions;
use crate::schema::{
@@ -214,6 +215,26 @@ impl FieldType {
}
}
/// returns true if the field is fast.
pub fn fastfield_cardinality(&self) -> Option<Cardinality> {
match *self {
FieldType::Bytes(ref bytes_options) if bytes_options.is_fast() => {
Some(Cardinality::SingleValue)
}
FieldType::Str(ref text_options) if text_options.is_fast() => {
Some(Cardinality::MultiValues)
}
FieldType::U64(ref int_options)
| FieldType::I64(ref int_options)
| FieldType::F64(ref int_options)
| FieldType::Bool(ref int_options) => int_options.get_fastfield_cardinality(),
FieldType::Date(ref date_options) => date_options.get_fastfield_cardinality(),
FieldType::Facet(_) => Some(Cardinality::MultiValues),
FieldType::JsonObject(_) => None,
_ => None,
}
}
/// returns true if the field is normed (see [fieldnorms](crate::fieldnorm)).
pub fn has_fieldnorms(&self) -> bool {
match *self {

View File

@@ -104,6 +104,13 @@ impl ZstdCompressor {
value, opt_name, err
)
})?;
if value >= 15 {
warn!(
"High zstd compression level detected: {:?}. High compression levels \
(>=15) are slow and will limit indexing speed.",
value
)
}
compressor.compression_level = Some(value);
}
_ => {

View File

@@ -255,7 +255,7 @@ where T: Iterator<Item = usize>
/// Emits all of the offsets where a codepoint starts
/// or a codepoint ends.
///
/// By convention, we emit [0] for the empty string.
/// By convention, we emit `[0]` for the empty string.
struct CodepointFrontiers<'a> {
s: &'a str,
next_el: Option<usize>,