Compare commits

..

1 Commits

Author SHA1 Message Date
Paul Masurel
d4e2d2e40e Searcher Warming API (#1258)
Adds an API to register Warmers in the IndexReader.


Co-authored-by: shikhar <shikhar@schmizz.net>
2022-01-20 14:32:42 +09:00
222 changed files with 2872 additions and 2655 deletions

View File

@@ -1,4 +1,4 @@
name: Long running tests name: Rust
on: on:
push: push:

View File

@@ -1,4 +1,4 @@
name: Unit tests name: Rust
on: on:
push: push:
@@ -18,25 +18,13 @@ jobs:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Build - name: Build
run: cargo build --verbose --workspace run: cargo build --verbose --workspace
- name: Install latest nightly to test also against unstable feature flag
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
components: rustfmt
- name: Install latest nightly to test also against unstable feature flag - name: Install latest nightly to test also against unstable feature flag
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
with: with:
toolchain: stable toolchain: stable
override: true override: true
components: rustfmt, clippy components: rustfmt
- name: Run tests - name: Run tests
run: cargo +stable test --features mmap,brotli-compression,lz4-compression,snappy-compression,failpoints --verbose --workspace run: cargo test --features mmap,brotli-compression,lz4-compression,snappy-compression,failpoints --verbose --workspace
- name: Check Formatting - name: Check Formatting
run: cargo +nightly fmt --all -- --check run: cargo fmt --all -- --check
- uses: actions-rs/clippy-check@v1
with:
toolchain: stable
token: ${{ secrets.GITHUB_TOKEN }}
args: --tests

View File

@@ -1,12 +1,12 @@
Tantivy 0.17 Tantivy 0.17
================================ ================================
- LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar @fulmicoton) [#115](https://github.com/quickwit-oss/tantivy/issues/115) - LogMergePolicy now triggers merges if the ratio of deleted documents reaches a threshold (@shikhar) [#115](https://github.com/quickwit-inc/tantivy/issues/115)
- Adds a searcher Warmer API (@shikhar @fulmicoton) - Adds a searcher Warmer API (@shikhar)
- Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211 - Change to non-strict schema. Ignore fields in data which are not defined in schema. Previously this returned an error. #1211
- Facets are necessarily indexed. Existing index with indexed facets should work out of the box. Index without facets that are marked with index: false should be broken (but they were already broken in a sense). (@fulmicoton) #1195 . - Facets are necessarily indexed. Existing index with indexed facets should work out of the box. Index without facets that are marked with index: false should be broken (but they were already broken in a sense). (@fulmicoton) #1195 .
- Bugfix that could in theory impact durability in theory on some filesystems [#1224](https://github.com/quickwit-oss/tantivy/issues/1224) - Bugfix that could in theory impact durability in theory on some filesystems [#1224](https://github.com/quickwit-inc/tantivy/issues/1224)
- Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-oss/tantivy/issues/922) - Schema now offers not indexing fieldnorms (@lpouget) [#922](https://github.com/quickwit-inc/tantivy/issues/922)
- Reduce the number of fsync calls [#1225](https://github.com/quickwit-oss/tantivy/issues/1225) - Reduce the number of fsync calls [#1225](https://github.com/quickwit-inc/tantivy/issues/1225)
Tantivy 0.16.2 Tantivy 0.16.2
================================ ================================
@@ -128,7 +128,7 @@ Tantivy 0.12.0
## How to update? ## How to update?
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
minor changes. Check https://github.com/quickwit-oss/tantivy/blob/main/examples/custom_tokenizer.rs minor changes. Check https://github.com/quickwit-inc/tantivy/blob/main/examples/custom_tokenizer.rs
to check for some code sample. to check for some code sample.
Tantivy 0.11.3 Tantivy 0.11.3

View File

@@ -6,8 +6,8 @@ license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
description = """Search engine library""" description = """Search engine library"""
documentation = "https://docs.rs/tantivy/" documentation = "https://docs.rs/tantivy/"
homepage = "https://github.com/quickwit-oss/tantivy" homepage = "https://github.com/quickwit-inc/tantivy"
repository = "https://github.com/quickwit-oss/tantivy" repository = "https://github.com/quickwit-inc/tantivy"
readme = "README.md" readme = "README.md"
keywords = ["search", "information", "retrieval"] keywords = ["search", "information", "retrieval"]
edition = "2018" edition = "2018"
@@ -52,7 +52,7 @@ chrono = "0.4.19"
smallvec = "1.6.1" smallvec = "1.6.1"
rayon = "1.5" rayon = "1.5"
lru = "0.7.0" lru = "0.7.0"
fastdivide = "0.4" fastdivide = "0.3"
itertools = "0.10.0" itertools = "0.10.0"
measure_time = "0.8.0" measure_time = "0.8.0"
@@ -95,6 +95,9 @@ unstable = [] # useful for benches.
[workspace] [workspace]
members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"] members = ["query-grammar", "bitpacker", "common", "fastfield_codecs", "ownedbytes"]
[badges]
travis-ci = { repository = "tantivy-search/tantivy" }
# Following the "fail" crate best practises, we isolate # Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points # tests that define specific behavior in fail check points
# in a different binary. # in a different binary.

View File

@@ -1,6 +1,3 @@
test: test:
echo "Run test only... No examples." echo "Run test only... No examples."
cargo test --tests --lib cargo test --tests --lib
fmt:
cargo +nightly fmt --all

View File

@@ -1,13 +1,22 @@
[![Docs](https://docs.rs/tantivy/badge.svg)](https://docs.rs/crate/tantivy/) [![Docs](https://docs.rs/tantivy/badge.svg)](https://docs.rs/crate/tantivy/)
[![Build Status](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml/badge.svg)](https://github.com/quickwit-oss/tantivy/actions/workflows/test.yml) [![Build Status](https://github.com/quickwit-inc/tantivy/actions/workflows/test.yml/badge.svg)](https://github.com/quickwit-inc/tantivy/actions/workflows/test.yml)
[![codecov](https://codecov.io/gh/quickwit-oss/tantivy/branch/main/graph/badge.svg)](https://codecov.io/gh/quickwit-oss/tantivy) [![codecov](https://codecov.io/gh/quickwit-inc/tantivy/branch/main/graph/badge.svg)](https://codecov.io/gh/quickwit-inc/tantivy)
[![Join the chat at https://discord.gg/MT27AG5EVE](https://shields.io/discord/908281611840282624?label=chat%20on%20discord)](https://discord.gg/MT27AG5EVE) [![Join the chat at https://discord.gg/MT27AG5EVE](https://shields.io/discord/908281611840282624?label=chat%20on%20discord)](https://discord.gg/MT27AG5EVE)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy) [![Crates.io](https://img.shields.io/crates/v/tantivy.svg)](https://crates.io/crates/tantivy)
![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png) ![Tantivy](https://tantivy-search.github.io/logo/tantivy-logo.png)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/0)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/0)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/1)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/1)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/2)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/2)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/3)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/3)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/4)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/4)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/5)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/5)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/6)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/7)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
**Tantivy** is a **full text search engine library** written in Rust. **Tantivy** is a **full text search engine library** written in Rust.
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
@@ -26,7 +35,7 @@ Your mileage WILL vary depending on the nature of queries and their load.
# Features # Features
- Full-text search - Full-text search
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy), [Vaporetto](https://crates.io/crates/vaporetto_tantivy), and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder)) - Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)), Japanese ([lindera](https://github.com/lindera-morphology/lindera-tantivy) and [tantivy-tokenizer-tiny-segmenter](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) and Korean ([lindera](https://github.com/lindera-morphology/lindera-tantivy) + [lindera-ko-dic-builder](https://github.com/lindera-morphology/lindera-ko-dic-builder))
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:) - Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools - Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as Lucene) - BM25 scoring (the same as Lucene)
@@ -57,7 +66,7 @@ are within the scope of Tantivy.
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows. Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows.
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html) - [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/quickwit-oss/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine, - [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
index documents, and search via the CLI or a small server with a REST API. index documents, and search via the CLI or a small server with a REST API.
It walks you through getting a wikipedia search engine up and running in a few minutes. It walks you through getting a wikipedia search engine up and running in a few minutes.
- [Reference doc for the last released version](https://docs.rs/tantivy/) - [Reference doc for the last released version](https://docs.rs/tantivy/)
@@ -83,7 +92,7 @@ Tantivy compiles on stable Rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run: To check out and run tests, you can simply run:
```bash ```bash
git clone https://github.com/quickwit-oss/tantivy.git git clone https://github.com/quickwit-inc/tantivy.git
cd tantivy cd tantivy
cargo build cargo build
``` ```

View File

@@ -6,7 +6,7 @@ authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = [] categories = []
description = """Tantivy-sub crate: bitpacking""" description = """Tantivy-sub crate: bitpacking"""
repository = "https://github.com/quickwit-oss/tantivy" repository = "https://github.com/quickwit-inc/tantivy"
keywords = [] keywords = []

View File

@@ -1,5 +1,4 @@
use std::convert::TryInto; use std::{convert::TryInto, io};
use std::io;
pub struct BitPacker { pub struct BitPacker {
mini_buffer: u64, mini_buffer: u64,

View File

@@ -1,11 +1,12 @@
use super::bitpacker::BitPacker;
use super::compute_num_bits;
use crate::{minmax, BitUnpacker}; use crate::{minmax, BitUnpacker};
use super::{bitpacker::BitPacker, compute_num_bits};
const BLOCK_SIZE: usize = 128; const BLOCK_SIZE: usize = 128;
/// `BlockedBitpacker` compresses data in blocks of /// `BlockedBitpacker` compresses data in blocks of
/// 128 elements, while keeping an index on it /// 128 elements, while keeping an index on it
///
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct BlockedBitpacker { pub struct BlockedBitpacker {
// bitpacked blocks // bitpacked blocks

View File

@@ -1,7 +1,8 @@
mod bitpacker; mod bitpacker;
mod blocked_bitpacker; mod blocked_bitpacker;
pub use crate::bitpacker::{BitPacker, BitUnpacker}; pub use crate::bitpacker::BitPacker;
pub use crate::bitpacker::BitUnpacker;
pub use crate::blocked_bitpacker::BlockedBitpacker; pub use crate::blocked_bitpacker::BlockedBitpacker;
/// Computes the number of bits that will be used for bitpacking. /// Computes the number of bits that will be used for bitpacking.

View File

@@ -1,8 +1,8 @@
use ownedbytes::OwnedBytes;
use std::convert::TryInto; use std::convert::TryInto;
use std::io::Write; use std::io::Write;
use std::{fmt, io, u64}; use std::u64;
use std::{fmt, io};
use ownedbytes::OwnedBytes;
#[derive(Clone, Copy, Eq, PartialEq)] #[derive(Clone, Copy, Eq, PartialEq)]
pub struct TinySet(u64); pub struct TinySet(u64);
@@ -187,6 +187,7 @@ fn num_buckets(max_val: u32) -> u32 {
impl BitSet { impl BitSet {
/// serialize a `BitSet`. /// serialize a `BitSet`.
///
pub fn serialize<T: Write>(&self, writer: &mut T) -> io::Result<()> { pub fn serialize<T: Write>(&self, writer: &mut T) -> io::Result<()> {
writer.write_all(self.max_value.to_le_bytes().as_ref())?; writer.write_all(self.max_value.to_le_bytes().as_ref())?;
for tinyset in self.tinysets.iter().cloned() { for tinyset in self.tinysets.iter().cloned() {
@@ -352,6 +353,7 @@ impl ReadOnlyBitSet {
} }
/// Iterate the tinyset on the fly from serialized data. /// Iterate the tinyset on the fly from serialized data.
///
#[inline] #[inline]
fn iter_tinysets(&self) -> impl Iterator<Item = TinySet> + '_ { fn iter_tinysets(&self) -> impl Iterator<Item = TinySet> + '_ {
self.data.chunks_exact(8).map(move |chunk| { self.data.chunks_exact(8).map(move |chunk| {
@@ -361,6 +363,7 @@ impl ReadOnlyBitSet {
} }
/// Iterate over the positions of the elements. /// Iterate over the positions of the elements.
///
#[inline] #[inline]
pub fn iter(&self) -> impl Iterator<Item = u32> + '_ { pub fn iter(&self) -> impl Iterator<Item = u32> + '_ {
self.iter_tinysets() self.iter_tinysets()
@@ -412,14 +415,14 @@ impl<'a> From<&'a BitSet> for ReadOnlyBitSet {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashSet; use super::BitSet;
use super::ReadOnlyBitSet;
use super::TinySet;
use ownedbytes::OwnedBytes; use ownedbytes::OwnedBytes;
use rand::distributions::Bernoulli; use rand::distributions::Bernoulli;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use std::collections::HashSet;
use super::{BitSet, ReadOnlyBitSet, TinySet};
#[test] #[test]
fn test_read_serialized_bitset_full_multi() { fn test_read_serialized_bitset_full_multi() {
@@ -440,7 +443,7 @@ mod tests {
bitset.serialize(&mut out).unwrap(); bitset.serialize(&mut out).unwrap();
let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out)); let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out));
assert_eq!(bitset.len() as usize, 64); assert_eq!(bitset.len() as usize, 64 as usize);
} }
#[test] #[test]
@@ -707,10 +710,10 @@ mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use super::BitSet;
use super::TinySet;
use test; use test;
use super::{BitSet, TinySet};
#[bench] #[bench]
fn bench_tinyset_pop(b: &mut test::Bencher) { fn bench_tinyset_pop(b: &mut test::Bencher) {
b.iter(|| { b.iter(|| {

View File

@@ -104,11 +104,10 @@ pub fn u64_to_f64(val: u64) -> f64 {
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {
use std::f64; use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
use super::{BinarySerializable, FixedSize};
use proptest::prelude::*; use proptest::prelude::*;
use std::f64;
use super::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64, BinarySerializable, FixedSize};
fn test_i64_converter_helper(val: i64) { fn test_i64_converter_helper(val: i64) {
assert_eq!(u64_to_i64(i64_to_u64(val)), val); assert_eq!(u64_to_i64(i64_to_u64(val)), val);
@@ -158,10 +157,10 @@ pub mod test {
#[test] #[test]
fn test_f64_order() { fn test_f64_order() {
assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY)) assert!(!(f64_to_u64(f64::NEG_INFINITY)..f64_to_u64(f64::INFINITY))
.contains(&f64_to_u64(f64::NAN))); // nan is not a number .contains(&f64_to_u64(f64::NAN))); //nan is not a number
assert!(f64_to_u64(1.5) > f64_to_u64(1.0)); // same exponent, different mantissa assert!(f64_to_u64(1.5) > f64_to_u64(1.0)); //same exponent, different mantissa
assert!(f64_to_u64(2.0) > f64_to_u64(1.0)); // same mantissa, different exponent assert!(f64_to_u64(2.0) > f64_to_u64(1.0)); //same mantissa, different exponent
assert!(f64_to_u64(2.0) > f64_to_u64(1.5)); // different exponent and mantissa assert!(f64_to_u64(2.0) > f64_to_u64(1.5)); //different exponent and mantissa
assert!(f64_to_u64(1.0) > f64_to_u64(-1.0)); // pos > neg assert!(f64_to_u64(1.0) > f64_to_u64(-1.0)); // pos > neg
assert!(f64_to_u64(-1.5) < f64_to_u64(-1.0)); assert!(f64_to_u64(-1.5) < f64_to_u64(-1.0));
assert!(f64_to_u64(-2.0) < f64_to_u64(1.0)); assert!(f64_to_u64(-2.0) < f64_to_u64(1.0));

View File

@@ -1,9 +1,10 @@
use std::io::{Read, Write}; use crate::Endianness;
use std::{fmt, io}; use crate::VInt;
use byteorder::{ReadBytesExt, WriteBytesExt}; use byteorder::{ReadBytesExt, WriteBytesExt};
use std::fmt;
use crate::{Endianness, VInt}; use std::io;
use std::io::Read;
use std::io::Write;
/// Trait for a simple binary serialization. /// Trait for a simple binary serialization.
pub trait BinarySerializable: fmt::Debug + Sized { pub trait BinarySerializable: fmt::Debug + Sized {
@@ -201,7 +202,8 @@ impl BinarySerializable for String {
#[cfg(test)] #[cfg(test)]
pub mod test { pub mod test {
use super::{VInt, *}; use super::VInt;
use super::*;
use crate::serialize::BinarySerializable; use crate::serialize::BinarySerializable;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() { pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new(); let mut buffer = Vec::new();

View File

@@ -1,9 +1,8 @@
use std::io;
use std::io::{Read, Write};
use byteorder::{ByteOrder, LittleEndian};
use super::BinarySerializable; use super::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
use std::io;
use std::io::Read;
use std::io::Write;
/// Wrapper over a `u64` that serializes as a variable int. /// Wrapper over a `u64` that serializes as a variable int.
#[derive(Clone, Copy, Debug, Eq, PartialEq)] #[derive(Clone, Copy, Debug, Eq, PartialEq)]
@@ -175,7 +174,9 @@ impl BinarySerializable for VInt {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{serialize_vint_u32, BinarySerializable, VInt}; use super::serialize_vint_u32;
use super::BinarySerializable;
use super::VInt;
fn aux_test_vint(val: u64) { fn aux_test_vint(val: u64) {
let mut v = [14u8; 10]; let mut v = [14u8; 10];

View File

@@ -54,8 +54,7 @@ impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
} }
} }
/// Struct used to prevent from calling /// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite.html#tymethod.terminate_ref) directly
/// [`terminate_ref`](trait.TerminatingWrite.html#tymethod.terminate_ref) directly
/// ///
/// The point is that while the type is public, it cannot be built by anyone /// The point is that while the type is public, it cannot be built by anyone
/// outside of this module. /// outside of this module.
@@ -65,7 +64,9 @@ pub struct AntiCallToken(());
pub trait TerminatingWrite: Write { pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref. /// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()> fn terminate(mut self) -> io::Result<()>
where Self: Sized { where
Self: Sized,
{
self.terminate_ref(AntiCallToken(())) self.terminate_ref(AntiCallToken(()))
} }
@@ -96,9 +97,8 @@ impl<'a> TerminatingWrite for &'a mut Vec<u8> {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use std::io::Write;
use super::CountingWriter; use super::CountingWriter;
use std::io::Write;
#[test] #[test]
fn test_counting_writer() { fn test_counting_writer() {

View File

@@ -38,7 +38,7 @@ Note: Tantivy 0.16 does not do this optimization yet.
In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?) In principle there are many algorithms possible that exploit the monotonically increasing nature. (aggregations maybe?)
## Usage ## Usage
The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of tantvy 0.16 only fast fields are allowed to be used. The index sorting can be configured setting [`sort_by_field`](https://github.com/quickwit-inc/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/core/index_meta.rs#L238) on `IndexSettings` and passing it to a `IndexBuilder`. As of tantvy 0.16 only fast fields are allowed to be used.
``` ```
let settings = IndexSettings { let settings = IndexSettings {
@@ -55,7 +55,7 @@ let index = index_builder.create_in_ram().unwrap();
## Implementation details ## Implementation details
Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-oss/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073). Sorting an index is applied in the serialization step. In general there are two serialization steps: [Finishing a single segment](https://github.com/quickwit-inc/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/segment_writer.rs#L338) and [merging multiple segments](https://github.com/quickwit-inc/tantivy/blob/000d76b11a139a84b16b9b95060a1c93e8b9851c/src/indexer/merger.rs#L1073).
In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets). In both cases we generate a docid mapping reflecting the sort. This mapping is used when serializing the different components (doc store, fastfields, posting list, normfield, facets).

View File

@@ -73,7 +73,7 @@ fn main() -> tantivy::Result<()> {
// multithreaded. // multithreaded.
// //
// Here we give tantivy a budget of `50MB`. // Here we give tantivy a budget of `50MB`.
// Using a bigger memory_arena for the indexer may increase // Using a bigger heap for the indexer may increase
// throughput, but 50 MB is already plenty. // throughput, but 50 MB is already plenty.
let mut index_writer = index.writer(50_000_000)?; let mut index_writer = index.writer(50_000_000)?;
@@ -91,8 +91,8 @@ fn main() -> tantivy::Result<()> {
old_man_doc.add_text(title, "The Old Man and the Sea"); old_man_doc.add_text(title, "The Old Man and the Sea");
old_man_doc.add_text( old_man_doc.add_text(
body, body,
"He was an old man who fished alone in a skiff in the Gulf Stream and he had gone \ "He was an old man who fished alone in a skiff in the Gulf Stream and \
eighty-four days now without taking a fish.", he had gone eighty-four days now without taking a fish.",
); );
// ... and add it to the `IndexWriter`. // ... and add it to the `IndexWriter`.

View File

@@ -12,7 +12,8 @@
use tantivy::collector::{Collector, SegmentCollector}; use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader}; use tantivy::fastfield::{DynamicFastFieldReader, FastFieldReader};
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, INDEXED, TEXT}; use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::{doc, Index, Score, SegmentReader}; use tantivy::{doc, Index, Score, SegmentReader};
#[derive(Default)] #[derive(Default)]

View File

@@ -62,7 +62,7 @@ fn main() -> tantivy::Result<()> {
// multithreaded. // multithreaded.
// //
// Here we use a buffer of 50MB per thread. Using a bigger // Here we use a buffer of 50MB per thread. Using a bigger
// memory arena for the indexer can increase its throughput. // heap for the indexer can increase its throughput.
let mut index_writer = index.writer(50_000_000)?; let mut index_writer = index.writer(50_000_000)?;
index_writer.add_document(doc!( index_writer.add_document(doc!(
title => "The Old Man and the Sea", title => "The Old Man and the Sea",

View File

@@ -56,9 +56,8 @@ fn main() -> tantivy::Result<()> {
// If it is `text`, let's make sure to keep it `raw` and let's avoid // If it is `text`, let's make sure to keep it `raw` and let's avoid
// running any text processing on it. // running any text processing on it.
// This is done by associating this field to the tokenizer named `raw`. // This is done by associating this field to the tokenizer named `raw`.
// Rather than building our // Rather than building our [`TextOptions`](//docs.rs/tantivy/~0/tantivy/schema/struct.TextOptions.html) manually,
// [`TextOptions`](//docs.rs/tantivy/~0/tantivy/schema/struct.TextOptions.html) manually, We // We use the `STRING` shortcut. `STRING` stands for indexed (without term frequency or positions)
// use the `STRING` shortcut. `STRING` stands for indexed (without term frequency or positions)
// and untokenized. // and untokenized.
// //
// Because we also want to be able to see this `id` in our returned documents, // Because we also want to be able to see this `id` in our returned documents,

View File

@@ -1,9 +1,9 @@
use std::collections::HashSet; use std::collections::HashSet;
use tantivy::collector::TopDocs; use tantivy::collector::TopDocs;
use tantivy::doc;
use tantivy::query::BooleanQuery; use tantivy::query::BooleanQuery;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, DocId, Index, Score, SegmentReader}; use tantivy::{DocId, Index, Score, SegmentReader};
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -87,7 +87,7 @@ fn main() -> tantivy::Result<()> {
.unwrap() .unwrap()
.get_first(title) .get_first(title)
.unwrap() .unwrap()
.as_text() .text()
.unwrap() .unwrap()
.to_owned() .to_owned()
}) })

View File

@@ -52,11 +52,11 @@ fn main() -> tantivy::Result<()> {
let term_the = Term::from_field_text(title, "the"); let term_the = Term::from_field_text(title, "the");
// This segment posting object is like a cursor over the documents matching the term. // This segment posting object is like a cursor over the documents matching the term.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term // The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
// frequencies and positions. // and positions.
// //
// If you don't need all this information, you may get better performance by decompressing // If you don't need all this information, you may get better performance by decompressing less
// less information. // information.
if let Some(mut segment_postings) = if let Some(mut segment_postings) =
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)? inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)?
{ {
@@ -109,11 +109,11 @@ fn main() -> tantivy::Result<()> {
let inverted_index = segment_reader.inverted_index(title)?; let inverted_index = segment_reader.inverted_index(title)?;
// This segment posting object is like a cursor over the documents matching the term. // This segment posting object is like a cursor over the documents matching the term.
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term // The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
// frequencies and positions. // and positions.
// //
// If you don't need all this information, you may get better performance by decompressing // If you don't need all this information, you may get better performance by decompressing less
// less information. // information.
if let Some(mut block_segment_postings) = if let Some(mut block_segment_postings) =
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)? inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)?
{ {

View File

@@ -28,7 +28,6 @@
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use tantivy::schema::{Schema, STORED, TEXT}; use tantivy::schema::{Schema, STORED, TEXT};
use tantivy::{doc, Index, IndexWriter, Opstamp, TantivyError}; use tantivy::{doc, Index, IndexWriter, Opstamp, TantivyError};
@@ -91,8 +90,7 @@ fn main() -> tantivy::Result<()> {
// # In the main thread, we commit 10 times, once every 500ms. // # In the main thread, we commit 10 times, once every 500ms.
for _ in 0..10 { for _ in 0..10 {
let opstamp: Opstamp = { let opstamp: Opstamp = {
// Committing or rollbacking on the other hand requires write lock. This will block // Committing or rollbacking on the other hand requires write lock. This will block other threads.
// other threads.
let mut index_writer_wlock = index_writer.write().unwrap(); let mut index_writer_wlock = index_writer.write().unwrap();
index_writer_wlock.commit()? index_writer_wlock.commit()?
}; };

View File

@@ -57,10 +57,7 @@ fn main() -> tantivy::Result<()> {
let doc = searcher.doc(doc_address)?; let doc = searcher.doc(doc_address)?;
let snippet = snippet_generator.snippet_from_doc(&doc); let snippet = snippet_generator.snippet_from_doc(&doc);
println!("Document score {}:", score); println!("Document score {}:", score);
println!( println!("title: {}", doc.get_first(title).unwrap().text().unwrap());
"title: {}",
doc.get_first(title).unwrap().as_text().unwrap()
);
println!("snippet: {}", snippet.to_html()); println!("snippet: {}", snippet.to_html());
println!("custom highlighting: {}", highlight(snippet)); println!("custom highlighting: {}", highlight(snippet));
} }

View File

@@ -6,10 +6,8 @@ use tantivy::collector::TopDocs;
use tantivy::fastfield::FastFieldReader; use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::{Field, Schema, FAST, TEXT}; use tantivy::schema::{Field, Schema, FAST, TEXT};
use tantivy::{ use tantivy::{doc, DocAddress, DocId, Index, IndexReader, SegmentReader, TrackedObject};
doc, DocAddress, DocId, Index, IndexReader, Opstamp, Searcher, SearcherGeneration, SegmentId, use tantivy::{Opstamp, Searcher, SearcherGeneration, SegmentId, Warmer};
SegmentReader, Warmer,
};
// This example shows how warmers can be used to // This example shows how warmers can be used to
// load a values from an external sources using the Warmer API. // load a values from an external sources using the Warmer API.
@@ -71,7 +69,7 @@ impl Warmer for DynamicPriceColumn {
Ok(()) Ok(())
} }
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) { fn garbage_collect(&self, live_generations: &[TrackedObject<SearcherGeneration>]) {
let live_segment_id_and_delete_ops: HashSet<(SegmentId, Option<Opstamp>)> = let live_segment_id_and_delete_ops: HashSet<(SegmentId, Option<Opstamp>)> =
live_generations live_generations
.iter() .iter()
@@ -92,6 +90,7 @@ impl Warmer for DynamicPriceColumn {
/// This map represents a map (ProductId -> Price) /// This map represents a map (ProductId -> Price)
/// ///
/// In practise, it could be fetching things from an external service, like a SQL table. /// In practise, it could be fetching things from an external service, like a SQL table.
///
#[derive(Default, Clone)] #[derive(Default, Clone)]
pub struct ExternalPriceTable { pub struct ExternalPriceTable {
prices: Arc<RwLock<HashMap<ProductId, Price>>>, prices: Arc<RwLock<HashMap<ProductId, Price>>>,

View File

@@ -4,14 +4,14 @@ extern crate test;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use fastfield_codecs::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer}; use fastfield_codecs::{
use fastfield_codecs::linearinterpol::{ bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer},
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer, linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer},
multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
},
*,
}; };
use fastfield_codecs::multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
};
use fastfield_codecs::*;
fn get_data() -> Vec<u64> { fn get_data() -> Vec<u64> {
let mut data: Vec<_> = (100..55000_u64) let mut data: Vec<_> = (100..55000_u64)

View File

@@ -1,9 +1,13 @@
use std::io::{self, Write}; use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use common::BinarySerializable; use common::BinarySerializable;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker}; use std::io::{self, Write};
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats}; use tantivy_bitpacker::BitUnpacker;
/// Depending on the field type, a different /// Depending on the field type, a different
/// fast field is required. /// fast field is required.

View File

@@ -53,8 +53,7 @@ pub trait FastFieldCodecSerializer {
pub trait FastFieldDataAccess { pub trait FastFieldDataAccess {
/// Return the value associated to the given position. /// Return the value associated to the given position.
/// ///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance /// Whenever possible use the Iterator passed to the fastfield creation instead, for performance reasons.
/// reasons.
/// ///
/// # Panics /// # Panics
/// ///
@@ -83,10 +82,12 @@ impl FastFieldDataAccess for Vec<u64> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer}; use crate::{
use crate::linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer}; bitpacked::{BitpackedFastFieldReader, BitpackedFastFieldSerializer},
use crate::multilinearinterpol::{ linearinterpol::{LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer},
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer, multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
},
}; };
pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>( pub fn create_and_validate<S: FastFieldCodecSerializer, R: FastFieldCodecReader>(

View File

@@ -1,10 +1,15 @@
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::ops::Sub; use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::{BinarySerializable, FixedSize}; use common::BinarySerializable;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker}; use common::FixedSize;
use tantivy_bitpacker::BitUnpacker;
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
/// Depending on the field type, a different /// Depending on the field type, a different
/// fast field is required. /// fast field is required.
@@ -132,7 +137,7 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
// will be offset to 0 // will be offset to 0
offset = offset.max(calculated_value - actual_value); offset = offset.max(calculated_value - actual_value);
} else { } else {
// positive value no offset reuqired //positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value); rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
} }
} }
@@ -166,7 +171,7 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
stats: FastFieldStats, stats: FastFieldStats,
) -> bool { ) -> bool {
if stats.num_vals < 3 { if stats.num_vals < 3 {
return false; // disable compressor for this case return false; //disable compressor for this case
} }
// On serialisation the offset is added to the actual value. // On serialisation the offset is added to the actual value.
// We need to make sure this won't run into overflow calculation issues. // We need to make sure this won't run into overflow calculation issues.
@@ -206,8 +211,8 @@ impl FastFieldCodecSerializer for LinearInterpolFastFieldSerializer {
.max() .max()
.unwrap_or(0); .unwrap_or(0);
// the theory would be that we don't have the actual max_distance, but we are close within // the theory would be that we don't have the actual max_distance, but we are close within 50%
// 50% threshold. // threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as // It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance // below. So the offset would = max_distance
// //

View File

@@ -1,8 +1,10 @@
#[macro_use] #[macro_use]
extern crate prettytable; extern crate prettytable;
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer; use fastfield_codecs::{
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer; linearinterpol::LinearInterpolFastFieldSerializer,
use fastfield_codecs::{FastFieldCodecSerializer, FastFieldStats}; multilinearinterpol::MultiLinearInterpolFastFieldSerializer, FastFieldCodecSerializer,
FastFieldStats,
};
use prettytable::{Cell, Row, Table}; use prettytable::{Cell, Row, Table};
fn main() { fn main() {
@@ -22,7 +24,7 @@ fn main() {
); );
results.push(res); results.push(res);
// let best_estimation_codec = results //let best_estimation_codec = results
//.iter() //.iter()
//.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap()) //.min_by(|res1, res2| res1.partial_cmp(&res2).unwrap())
//.unwrap(); //.unwrap();
@@ -39,6 +41,7 @@ fn main() {
} else { } else {
(est.to_string(), comp.to_string()) (est.to_string(), comp.to_string())
}; };
#[allow(clippy::all)]
let style = if comp == best_compression_ratio_codec.1 { let style = if comp == best_compression_ratio_codec.1 {
"Fb" "Fb"
} else { } else {
@@ -46,7 +49,7 @@ fn main() {
}; };
table.add_row(Row::new(vec![ table.add_row(Row::new(vec![
Cell::new(name).style_spec("bFg"), Cell::new(&name.to_string()).style_spec("bFg"),
Cell::new(&ratio_cell).style_spec(style), Cell::new(&ratio_cell).style_spec(style),
Cell::new(&est_cell).style_spec(""), Cell::new(&est_cell).style_spec(""),
])); ]));
@@ -70,7 +73,7 @@ pub fn get_codec_test_data_sets() -> Vec<(Vec<u64>, &'static str)> {
current_cumulative current_cumulative
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>(); //let data = (1..=200000_u64).map(|num| num + num).collect::<Vec<_>>();
data_and_names.push((data, "Monotonically increasing concave")); data_and_names.push((data, "Monotonically increasing concave"));
let mut current_cumulative = 0; let mut current_cumulative = 0;

View File

@@ -1,22 +1,30 @@
//! MultiLinearInterpol compressor uses linear interpolation to guess a values and stores the /*!
//! offset, but in blocks of 512.
//!
//! With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 /
//! 512 = 0,45 bits per element. The additional space required per element in a block is the the
//! maximum deviation of the linear interpolation estimation function.
//!
//! E.g. if the maximum deviation of an element is 12, all elements cost 4bits.
//!
//! Size per block:
//! Num Elements * Maximum Deviation from Interpolation + 29 Byte Metadata
MultiLinearInterpol compressor uses linear interpolation to guess a values and stores the offset, but in blocks of 512.
With a CHUNK_SIZE of 512 and 29 byte metadata per block, we get a overhead for metadata of 232 / 512 = 0,45 bits per element.
The additional space required per element in a block is the the maximum deviation of the linear interpolation estimation function.
E.g. if the maximum deviation of an element is 12, all elements cost 4bits.
Size per block:
Num Elements * Maximum Deviation from Interpolation + 29 Byte Metadata
*/
use crate::FastFieldCodecReader;
use crate::FastFieldCodecSerializer;
use crate::FastFieldDataAccess;
use crate::FastFieldStats;
use common::CountingWriter;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::ops::Sub; use std::ops::Sub;
use tantivy_bitpacker::compute_num_bits;
use tantivy_bitpacker::BitPacker;
use common::{BinarySerializable, CountingWriter, DeserializeFrom}; use common::BinarySerializable;
use tantivy_bitpacker::{compute_num_bits, BitPacker, BitUnpacker}; use common::DeserializeFrom;
use tantivy_bitpacker::BitUnpacker;
use crate::{FastFieldCodecReader, FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats};
const CHUNK_SIZE: u64 = 512; const CHUNK_SIZE: u64 = 512;
@@ -244,11 +252,11 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
); );
if calculated_value > actual_value { if calculated_value > actual_value {
// negative value we need to apply an offset // negative value we need to apply an offset
// we ignore negative values in the max value calculation, because negative // we ignore negative values in the max value calculation, because negative values
// values will be offset to 0 // will be offset to 0
offset = offset.max(calculated_value - actual_value); offset = offset.max(calculated_value - actual_value);
} else { } else {
// positive value no offset reuqired //positive value no offset reuqired
rel_positive_max = rel_positive_max.max(actual_value - calculated_value); rel_positive_max = rel_positive_max.max(actual_value - calculated_value);
} }
} }
@@ -342,8 +350,8 @@ impl FastFieldCodecSerializer for MultiLinearInterpolFastFieldSerializer {
.unwrap(); .unwrap();
// Estimate one block and extrapolate the cost to all blocks. // Estimate one block and extrapolate the cost to all blocks.
// the theory would be that we don't have the actual max_distance, but we are close within // the theory would be that we don't have the actual max_distance, but we are close within 50%
// 50% threshold. // threshold.
// It is multiplied by 2 because in a log case scenario the line would be as much above as // It is multiplied by 2 because in a log case scenario the line would be as much above as
// below. So the offset would = max_distance // below. So the offset would = max_distance
// //

View File

@@ -1,9 +1,11 @@
use std::convert::TryInto; #![allow(clippy::return_self_not_must_use)]
use std::ops::{Deref, Range};
use std::sync::Arc;
use std::{fmt, io, mem};
use stable_deref_trait::StableDeref; use stable_deref_trait::StableDeref;
use std::convert::TryInto;
use std::mem;
use std::ops::{Deref, Range};
use std::sync::Arc;
use std::{fmt, io};
/// An OwnedBytes simply wraps an object that owns a slice of data and exposes /// An OwnedBytes simply wraps an object that owns a slice of data and exposes
/// this data as a static slice. /// this data as a static slice.
@@ -100,6 +102,7 @@ impl OwnedBytes {
} }
/// Drops the left most `advance_len` bytes. /// Drops the left most `advance_len` bytes.
///
#[inline] #[inline]
pub fn advance(&mut self, advance_len: usize) { pub fn advance(&mut self, advance_len: usize) {
self.data = &self.data[advance_len..] self.data = &self.data[advance_len..]
@@ -160,7 +163,8 @@ impl PartialEq<str> for OwnedBytes {
} }
impl<'a, T: ?Sized> PartialEq<&'a T> for OwnedBytes impl<'a, T: ?Sized> PartialEq<&'a T> for OwnedBytes
where OwnedBytes: PartialEq<T> where
OwnedBytes: PartialEq<T>,
{ {
fn eq(&self, other: &&'a T) -> bool { fn eq(&self, other: &&'a T) -> bool {
*self == **other *self == **other

View File

@@ -5,8 +5,9 @@ authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
description = """Search engine library""" description = """Search engine library"""
homepage = "https://github.com/quickwit-oss/tantivy" documentation = "https://quickwit-inc.github.io/tantivy/tantivy/index.html"
repository = "https://github.com/quickwit-oss/tantivy" homepage = "https://github.com/quickwit-inc/tantivy"
repository = "https://github.com/quickwit-inc/tantivy"
readme = "README.md" readme = "README.md"
keywords = ["search", "information", "retrieval"] keywords = ["search", "information", "retrieval"]
edition = "2018" edition = "2018"

View File

@@ -1,20 +1,17 @@
use combine::error::StringStreamError; use super::user_input_ast::{UserInputAst, UserInputBound, UserInputLeaf, UserInputLiteral};
use crate::Occur;
use combine::parser::char::{char, digit, space, spaces, string}; use combine::parser::char::{char, digit, space, spaces, string};
use combine::parser::combinator::recognize;
use combine::parser::range::{take_while, take_while1}; use combine::parser::range::{take_while, take_while1};
use combine::parser::repeat::escaped; use combine::parser::repeat::escaped;
use combine::parser::Parser; use combine::parser::Parser;
use combine::{ use combine::{
attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value, attempt, choice, eof, many, many1, one_of, optional, parser, satisfy, skip_many1, value,
}; };
use combine::{error::StringStreamError, parser::combinator::recognize};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use super::user_input_ast::{UserInputAst, UserInputBound, UserInputLeaf, UserInputLiteral}; // Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to special characters.
use crate::Occur;
// Note: '-' char is only forbidden at the beginning of a field name, would be clearer to add it to
// special characters.
const SPECIAL_CHARS: &[char] = &[ const SPECIAL_CHARS: &[char] = &[
'+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ', '+', '^', '`', ':', '{', '}', '"', '[', ']', '(', ')', '~', '!', '\\', '*', ' ',
]; ];
@@ -366,9 +363,8 @@ mod test {
type TestParseResult = Result<(), StringStreamError>; type TestParseResult = Result<(), StringStreamError>;
use combine::parser::Parser;
use super::*; use super::*;
use combine::parser::Parser;
pub fn nearly_equals(a: f64, b: f64) -> bool { pub fn nearly_equals(a: f64, b: f64) -> bool {
(a - b).abs() < 0.0005 * (a + b).abs() (a - b).abs() < 0.0005 * (a + b).abs()

View File

@@ -1,7 +1 @@
comment_width = 120 use_try_shorthand = true
format_strings = true
group_imports = "StdExternalCrate"
imports_granularity = "Module"
normalize_comments = true
where_single_line = true
wrap_comments = true

View File

@@ -1,6 +1,9 @@
use super::Collector; use super::Collector;
use crate::collector::SegmentCollector; use crate::collector::SegmentCollector;
use crate::{DocId, Score, SegmentOrdinal, SegmentReader}; use crate::DocId;
use crate::Score;
use crate::SegmentOrdinal;
use crate::SegmentReader;
/// `CountCollector` collector only counts how many /// `CountCollector` collector only counts how many
/// documents match the query. /// documents match the query.
@@ -77,7 +80,8 @@ impl SegmentCollector for SegmentCountCollector {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{Count, SegmentCountCollector}; use super::{Count, SegmentCountCollector};
use crate::collector::{Collector, SegmentCollector}; use crate::collector::Collector;
use crate::collector::SegmentCollector;
#[test] #[test]
fn test_count_collect_does_not_requires_scoring() { fn test_count_collect_does_not_requires_scoring() {

View File

@@ -8,7 +8,8 @@ pub(crate) struct CustomScoreTopCollector<TCustomScorer, TScore = Score> {
} }
impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore> impl<TCustomScorer, TScore> CustomScoreTopCollector<TCustomScorer, TScore>
where TScore: Clone + PartialOrd where
TScore: Clone + PartialOrd,
{ {
pub(crate) fn new( pub(crate) fn new(
custom_scorer: TCustomScorer, custom_scorer: TCustomScorer,
@@ -113,7 +114,8 @@ where
} }
impl<F, TScore> CustomSegmentScorer<TScore> for F impl<F, TScore> CustomSegmentScorer<TScore> for F
where F: 'static + FnMut(DocId) -> TScore where
F: 'static + FnMut(DocId) -> TScore,
{ {
fn score(&mut self, doc: DocId) -> TScore { fn score(&mut self, doc: DocId) -> TScore {
(self)(doc) (self)(doc)

View File

@@ -1,8 +1,9 @@
use std::collections::HashSet; use std::collections::HashSet;
use super::{Collector, SegmentCollector};
use crate::{DocAddress, DocId, Score}; use crate::{DocAddress, DocId, Score};
use super::{Collector, SegmentCollector};
/// Collectors that returns the set of DocAddress that matches the query. /// Collectors that returns the set of DocAddress that matches the query.
/// ///
/// This collector is mostly useful for tests. /// This collector is mostly useful for tests.

View File

@@ -1,14 +1,21 @@
use crate::collector::Collector;
use crate::collector::SegmentCollector;
use crate::fastfield::FacetReader;
use crate::schema::Facet;
use crate::schema::Field;
use crate::DocId;
use crate::Score;
use crate::SegmentOrdinal;
use crate::SegmentReader;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::{btree_map, BTreeMap, BTreeSet, BinaryHeap}; use std::collections::btree_map;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::BinaryHeap;
use std::iter::Peekable; use std::iter::Peekable;
use std::ops::Bound; use std::ops::Bound;
use std::{u64, usize}; use std::{u64, usize};
use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::FacetReader;
use crate::schema::{Facet, Field};
use crate::{DocId, Score, SegmentOrdinal, SegmentReader};
struct Hit<'a> { struct Hit<'a> {
count: u64, count: u64,
facet: &'a Facet, facet: &'a Facet,
@@ -233,7 +240,9 @@ impl FacetCollector {
/// If you need the correct number of unique documents for two such facets, /// If you need the correct number of unique documents for two such facets,
/// just add them in separate `FacetCollector`. /// just add them in separate `FacetCollector`.
pub fn add_facet<T>(&mut self, facet_from: T) pub fn add_facet<T>(&mut self, facet_from: T)
where Facet: From<T> { where
Facet: From<T>,
{
let facet = Facet::from(facet_from); let facet = Facet::from(facet_from);
for old_facet in &self.facets { for old_facet in &self.facets {
assert!( assert!(
@@ -393,7 +402,9 @@ impl FacetCounts {
/// Returns an iterator over all of the facet count pairs inside this result. /// Returns an iterator over all of the facet count pairs inside this result.
/// See the documentation for [FacetCollector] for a usage example. /// See the documentation for [FacetCollector] for a usage example.
pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_> pub fn get<T>(&self, facet_from: T) -> FacetChildIterator<'_>
where Facet: From<T> { where
Facet: From<T>,
{
let facet = Facet::from(facet_from); let facet = Facet::from(facet_from);
let left_bound = Bound::Excluded(facet.clone()); let left_bound = Bound::Excluded(facet.clone());
let right_bound = if facet.is_root() { let right_bound = if facet.is_root() {
@@ -412,7 +423,9 @@ impl FacetCounts {
/// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts. /// Returns a vector of top `k` facets with their counts, sorted highest-to-lowest by counts.
/// See the documentation for [FacetCollector] for a usage example. /// See the documentation for [FacetCollector] for a usage example.
pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)> pub fn top_k<T>(&self, facet: T, k: usize) -> Vec<(&Facet, u64)>
where Facet: From<T> { where
Facet: From<T>,
{
let mut heap = BinaryHeap::with_capacity(k); let mut heap = BinaryHeap::with_capacity(k);
let mut it = self.get(facet); let mut it = self.get(facet);
@@ -445,18 +458,16 @@ impl FacetCounts {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::iter;
use rand::distributions::Uniform;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng};
use super::{FacetCollector, FacetCounts}; use super::{FacetCollector, FacetCounts};
use crate::collector::Count; use crate::collector::Count;
use crate::core::Index; use crate::core::Index;
use crate::query::{AllQuery, QueryParser, TermQuery}; use crate::query::{AllQuery, QueryParser, TermQuery};
use crate::schema::{Document, Facet, FacetOptions, Field, IndexRecordOption, Schema}; use crate::schema::{Document, Facet, FacetOptions, Field, IndexRecordOption, Schema};
use crate::Term; use crate::Term;
use rand::distributions::Uniform;
use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng};
use std::iter;
#[test] #[test]
fn test_facet_collector_drilldown() -> crate::Result<()> { fn test_facet_collector_drilldown() -> crate::Result<()> {
@@ -511,9 +522,8 @@ mod tests {
} }
#[test] #[test]
#[should_panic( #[should_panic(expected = "Tried to add a facet which is a descendant of \
expected = "Tried to add a facet which is a descendant of an already added facet." an already added facet.")]
)]
fn test_misused_facet_collector() { fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
@@ -690,14 +700,13 @@ mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use rand::seq::SliceRandom;
use rand::thread_rng;
use test::Bencher;
use crate::collector::FacetCollector; use crate::collector::FacetCollector;
use crate::query::AllQuery; use crate::query::AllQuery;
use crate::schema::{Facet, Schema, INDEXED}; use crate::schema::{Facet, Schema, INDEXED};
use crate::Index; use crate::Index;
use rand::seq::SliceRandom;
use rand::thread_rng;
use test::Bencher;
#[bench] #[bench]
fn bench_facet_collector(b: &mut Bencher) { fn bench_facet_collector(b: &mut Bencher) {

View File

@@ -17,8 +17,7 @@ use crate::schema::Field;
use crate::{Score, SegmentReader, TantivyError}; use crate::{Score, SegmentReader, TantivyError};
/// The `FilterCollector` filters docs using a fast field value and a predicate. /// The `FilterCollector` filters docs using a fast field value and a predicate.
/// Only the documents for which the predicate returned "true" will be passed on to the next /// Only the documents for which the predicate returned "true" will be passed on to the next collector.
/// collector.
/// ///
/// ```rust /// ```rust
/// use tantivy::collector::{TopDocs, FilterCollector}; /// use tantivy::collector::{TopDocs, FilterCollector};
@@ -59,7 +58,8 @@ use crate::{Score, SegmentReader, TantivyError};
/// # } /// # }
/// ``` /// ```
pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: FastValue> pub struct FilterCollector<TCollector, TPredicate, TPredicateValue: FastValue>
where TPredicate: 'static + Clone where
TPredicate: 'static + Clone,
{ {
field: Field, field: Field,
collector: TCollector, collector: TCollector,

View File

@@ -1,9 +1,8 @@
use fastdivide::DividerU64;
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue}; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue};
use crate::schema::{Field, Type}; use crate::schema::{Field, Type};
use crate::{DocId, Score}; use crate::{DocId, Score};
use fastdivide::DividerU64;
/// Histogram builds an histogram of the values of a fastfield for the /// Histogram builds an histogram of the values of a fastfield for the
/// collected DocSet. /// collected DocSet.
@@ -37,8 +36,8 @@ impl HistogramCollector {
/// - `bucket_width`: the length of the interval that is associated to each buckets. /// - `bucket_width`: the length of the interval that is associated to each buckets.
/// - `num_buckets`: The overall number of buckets. /// - `num_buckets`: The overall number of buckets.
/// ///
/// Together, this parameters define a partition of `[min_value, min_value + num_buckets * /// Together, this parameters define a partition of `[min_value, min_value + num_buckets * bucket_width)`
/// bucket_width)` into `num_buckets` intervals of width bucket that we call `bucket`. /// into `num_buckets` intervals of width bucket that we call `bucket`.
/// ///
/// # Disclaimer /// # Disclaimer
/// This function panics if the field given is of type f64. /// This function panics if the field given is of type f64.
@@ -148,13 +147,12 @@ fn add_vecs(mut vals_list: Vec<Vec<u64>>, len: usize) -> Vec<u64> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use fastdivide::DividerU64;
use query::AllQuery;
use super::{add_vecs, HistogramCollector, HistogramComputer}; use super::{add_vecs, HistogramCollector, HistogramComputer};
use crate::chrono::{TimeZone, Utc}; use crate::chrono::{TimeZone, Utc};
use crate::schema::{Schema, FAST}; use crate::schema::{Schema, FAST};
use crate::{doc, query, Index}; use crate::{doc, query, Index};
use fastdivide::DividerU64;
use query::AllQuery;
#[test] #[test]
fn test_add_histograms_simple() { fn test_add_histograms_simple() {

View File

@@ -1,90 +1,95 @@
//! # Collectors /*!
//!
//! Collectors define the information you want to extract from the documents matching the queries.
//! In tantivy jargon, we call this information your search "fruit".
//!
//! Your fruit could for instance be :
//! - [the count of matching documents](./struct.Count.html)
//! - [the top 10 documents, by relevancy or by a fast field](./struct.TopDocs.html)
//! - [facet counts](./struct.FacetCollector.html)
//!
//! At one point in your code, you will trigger the actual search operation by calling
//! [the `search(...)` method of your `Searcher` object](../struct.Searcher.html#method.search).
//! This call will look like this.
//!
//! ```verbatim
//! let fruit = searcher.search(&query, &collector)?;
//! ```
//!
//! Here the type of fruit is actually determined as an associated type of the collector
//! (`Collector::Fruit`).
//!
//!
//! # Combining several collectors
//!
//! A rich search experience often requires to run several collectors on your search query.
//! For instance,
//! - selecting the top-K products matching your query
//! - counting the matching documents
//! - computing several facets
//! - computing statistics about the matching product prices
//!
//! A simple and efficient way to do that is to pass your collectors as one tuple.
//! The resulting `Fruit` will then be a typed tuple with each collector's original fruits
//! in their respective position.
//!
//! ```rust
//! # use tantivy::schema::*;
//! # use tantivy::*;
//! # use tantivy::query::*;
//! use tantivy::collector::{Count, TopDocs};
//! #
//! # fn main() -> tantivy::Result<()> {
//! # let mut schema_builder = Schema::builder();
//! # let title = schema_builder.add_text_field("title", TEXT);
//! # let schema = schema_builder.build();
//! # let index = Index::create_in_ram(schema);
//! # let mut index_writer = index.writer(3_000_000)?;
//! # index_writer.add_document(doc!(
//! # title => "The Name of the Wind",
//! # ))?;
//! # index_writer.add_document(doc!(
//! # title => "The Diary of Muadib",
//! # ))?;
//! # index_writer.commit()?;
//! # let reader = index.reader()?;
//! # let searcher = reader.searcher();
//! # let query_parser = QueryParser::for_index(&index, vec![title]);
//! # let query = query_parser.parse_query("diary")?;
//! let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
//! searcher.search(&query, &(Count, TopDocs::with_limit(2)))?;
//! # Ok(())
//! # }
//! ```
//!
//! The `Collector` trait is implemented for up to 4 collectors.
//! If you have more than 4 collectors, you can either group them into
//! tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`](./struct.MultiCollector.html).
//!
//! # Combining several collectors dynamically
//!
//! Combining collectors into a tuple is a zero-cost abstraction: everything
//! happens as if you had manually implemented a single collector
//! combining all of our features.
//!
//! Unfortunately it requires you to know at compile time your collector types.
//! If on the other hand, the collectors depend on some query parameter,
//! you can rely on `MultiCollector`'s.
//!
//!
//! # Implementing your own collectors.
//!
//! See the `custom_collector` example.
# Collectors
Collectors define the information you want to extract from the documents matching the queries.
In tantivy jargon, we call this information your search "fruit".
Your fruit could for instance be :
- [the count of matching documents](./struct.Count.html)
- [the top 10 documents, by relevancy or by a fast field](./struct.TopDocs.html)
- [facet counts](./struct.FacetCollector.html)
At one point in your code, you will trigger the actual search operation by calling
[the `search(...)` method of your `Searcher` object](../struct.Searcher.html#method.search).
This call will look like this.
```verbatim
let fruit = searcher.search(&query, &collector)?;
```
Here the type of fruit is actually determined as an associated type of the collector (`Collector::Fruit`).
# Combining several collectors
A rich search experience often requires to run several collectors on your search query.
For instance,
- selecting the top-K products matching your query
- counting the matching documents
- computing several facets
- computing statistics about the matching product prices
A simple and efficient way to do that is to pass your collectors as one tuple.
The resulting `Fruit` will then be a typed tuple with each collector's original fruits
in their respective position.
```rust
# use tantivy::schema::*;
# use tantivy::*;
# use tantivy::query::*;
use tantivy::collector::{Count, TopDocs};
#
# fn main() -> tantivy::Result<()> {
# let mut schema_builder = Schema::builder();
# let title = schema_builder.add_text_field("title", TEXT);
# let schema = schema_builder.build();
# let index = Index::create_in_ram(schema);
# let mut index_writer = index.writer(3_000_000)?;
# index_writer.add_document(doc!(
# title => "The Name of the Wind",
# ))?;
# index_writer.add_document(doc!(
# title => "The Diary of Muadib",
# ))?;
# index_writer.commit()?;
# let reader = index.reader()?;
# let searcher = reader.searcher();
# let query_parser = QueryParser::for_index(&index, vec![title]);
# let query = query_parser.parse_query("diary")?;
let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =
searcher.search(&query, &(Count, TopDocs::with_limit(2)))?;
# Ok(())
# }
```
The `Collector` trait is implemented for up to 4 collectors.
If you have more than 4 collectors, you can either group them into
tuples of tuples `(a,(b,(c,d)))`, or rely on [`MultiCollector`](./struct.MultiCollector.html).
# Combining several collectors dynamically
Combining collectors into a tuple is a zero-cost abstraction: everything
happens as if you had manually implemented a single collector
combining all of our features.
Unfortunately it requires you to know at compile time your collector types.
If on the other hand, the collectors depend on some query parameter,
you can rely on `MultiCollector`'s.
# Implementing your own collectors.
See the `custom_collector` example.
*/
use crate::DocId;
use crate::Score;
use crate::SegmentOrdinal;
use crate::SegmentReader;
use downcast_rs::impl_downcast; use downcast_rs::impl_downcast;
use crate::{DocId, Score, SegmentOrdinal, SegmentReader};
mod count_collector; mod count_collector;
pub use self::count_collector::Count; pub use self::count_collector::Count;
@@ -106,7 +111,8 @@ mod tweak_score_top_collector;
pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker}; pub use self::tweak_score_top_collector::{ScoreSegmentTweaker, ScoreTweaker};
mod facet_collector; mod facet_collector;
pub use self::facet_collector::{FacetCollector, FacetCounts}; pub use self::facet_collector::FacetCollector;
pub use self::facet_collector::FacetCounts;
use crate::query::Weight; use crate::query::Weight;
mod docset_collector; mod docset_collector;

View File

@@ -1,10 +1,14 @@
use super::Collector;
use super::SegmentCollector;
use crate::collector::Fruit;
use crate::DocId;
use crate::Score;
use crate::SegmentOrdinal;
use crate::SegmentReader;
use crate::TantivyError;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ops::Deref; use std::ops::Deref;
use super::{Collector, SegmentCollector};
use crate::collector::Fruit;
use crate::{DocId, Score, SegmentOrdinal, SegmentReader, TantivyError};
pub struct MultiFruit { pub struct MultiFruit {
sub_fruits: Vec<Option<Box<dyn Fruit>>>, sub_fruits: Vec<Option<Box<dyn Fruit>>>,
} }
@@ -100,8 +104,7 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// ///
/// If the type of the collectors is known, you can just group yours collectors /// If the type of the collectors is known, you can just group yours collectors
/// in a tuple. See the /// in a tuple. See the
/// [Combining several collectors section of the collector /// [Combining several collectors section of the collector documentation](./index.html#combining-several-collectors).
/// documentation](./index.html#combining-several-collectors).
/// ///
/// ```rust /// ```rust
/// use tantivy::collector::{Count, TopDocs, MultiCollector}; /// use tantivy::collector::{Count, TopDocs, MultiCollector};
@@ -245,8 +248,10 @@ mod tests {
use super::*; use super::*;
use crate::collector::{Count, TopDocs}; use crate::collector::{Count, TopDocs};
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::{IndexRecordOption, Schema, TEXT}; use crate::schema::IndexRecordOption;
use crate::{Index, Term}; use crate::schema::{Schema, TEXT};
use crate::Index;
use crate::Term;
#[test] #[test]
fn test_multi_collector() -> crate::Result<()> { fn test_multi_collector() -> crate::Result<()> {

View File

@@ -1,12 +1,20 @@
use std::str::FromStr;
use super::*; use super::*;
use crate::collector::{Count, FilterCollector, TopDocs};
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::fastfield::{BytesFastFieldReader, DynamicFastFieldReader, FastFieldReader}; use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::DynamicFastFieldReader;
use crate::fastfield::FastFieldReader;
use crate::schema::Field;
use crate::DocId;
use crate::Score;
use crate::SegmentOrdinal;
use crate::{DocAddress, Document, Searcher};
use crate::collector::{Count, FilterCollector, TopDocs};
use crate::query::{AllQuery, QueryParser}; use crate::query::{AllQuery, QueryParser};
use crate::schema::{Field, Schema, FAST, TEXT}; use crate::schema::{Schema, FAST, TEXT};
use crate::{doc, DateTime, DocAddress, DocId, Document, Index, Score, Searcher, SegmentOrdinal}; use crate::DateTime;
use crate::{doc, Index};
use std::str::FromStr;
pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector { pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector {
compute_score: true, compute_score: true,

View File

@@ -1,9 +1,11 @@
use crate::DocAddress;
use crate::DocId;
use crate::SegmentOrdinal;
use crate::SegmentReader;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::BinaryHeap; use std::collections::BinaryHeap;
use std::marker::PhantomData; use std::marker::PhantomData;
use crate::{DocAddress, DocId, SegmentOrdinal, SegmentReader};
/// Contains a feature (field, score, etc.) of a document along with the document address. /// Contains a feature (field, score, etc.) of a document along with the document address.
/// ///
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the /// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
@@ -60,7 +62,8 @@ pub(crate) struct TopCollector<T> {
} }
impl<T> TopCollector<T> impl<T> TopCollector<T>
where T: PartialOrd + Clone where
T: PartialOrd + Clone,
{ {
/// Creates a top collector, with a number of documents equal to "limit". /// Creates a top collector, with a number of documents equal to "limit".
/// ///
@@ -250,7 +253,7 @@ mod tests {
// when harvesting we have to guarantee stable sorting in case of a tie // when harvesting we have to guarantee stable sorting in case of a tie
// on the score // on the score
let doc_ids_collection = [4, 5, 6]; let doc_ids_collection = [4, 5, 6];
let score = 3.3f32; let score = 3.14;
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2); let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
for id in &doc_ids_collection { for id in &doc_ids_collection {
@@ -319,9 +322,8 @@ mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use test::Bencher;
use super::TopSegmentCollector; use super::TopSegmentCollector;
use test::Bencher;
#[bench] #[bench]
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) { fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {

View File

@@ -1,18 +1,21 @@
use std::collections::BinaryHeap;
use std::fmt;
use std::marker::PhantomData;
use super::Collector; use super::Collector;
use crate::collector::custom_score_top_collector::CustomScoreTopCollector; use crate::collector::top_collector::{ComparableDoc, TopCollector};
use crate::collector::top_collector::{ComparableDoc, TopCollector, TopSegmentCollector};
use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector; use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
use crate::collector::{ use crate::collector::{
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector, CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
}; };
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, FastValue}; use crate::fastfield::{DynamicFastFieldReader, FastFieldReader};
use crate::query::Weight; use crate::query::Weight;
use crate::schema::Field; use crate::schema::Field;
use crate::{DocAddress, DocId, Score, SegmentOrdinal, SegmentReader, TantivyError}; use crate::DocAddress;
use crate::DocId;
use crate::Score;
use crate::SegmentOrdinal;
use crate::SegmentReader;
use crate::{collector::custom_score_top_collector::CustomScoreTopCollector, fastfield::FastValue};
use crate::{collector::top_collector::TopSegmentCollector, TantivyError};
use std::fmt;
use std::{collections::BinaryHeap, marker::PhantomData};
struct FastFieldConvertCollector< struct FastFieldConvertCollector<
TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>, TCollector: Collector<Fruit = Vec<(u64, DocAddress)>>,
@@ -214,12 +217,11 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field. /// Set top-K to rank documents by a given fast field.
/// ///
/// If the field is not a fast or does not exist, this method returns successfully (it is not /// If the field is not a fast or does not exist, this method returns successfully (it is not aware of any schema).
/// aware of any schema). An error will be returned at the moment of search. /// An error will be returned at the moment of search.
/// ///
/// If the field is a FAST field but not a u64 field, search will return successfully but it /// If the field is a FAST field but not a u64 field, search will return successfully but it will return
/// will return returns a monotonic u64-representation (ie. the order is still correct) of /// returns a monotonic u64-representation (ie. the order is still correct) of the requested field type.
/// the requested field type.
/// ///
/// # Example /// # Example
/// ///
@@ -294,15 +296,14 @@ impl TopDocs {
/// Set top-K to rank documents by a given fast field. /// Set top-K to rank documents by a given fast field.
/// ///
/// If the field is not a fast field, or its field type does not match the generic type, this /// If the field is not a fast field, or its field type does not match the generic type, this method does not panic,
/// method does not panic, but an explicit error will be returned at the moment of /// but an explicit error will be returned at the moment of collection.
/// collection.
/// ///
/// Note that this method is a generic. The requested fast field type will be often /// Note that this method is a generic. The requested fast field type will be often
/// inferred in your code by the rust compiler. /// inferred in your code by the rust compiler.
/// ///
/// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation /// Implementation-wise, for performance reason, tantivy will manipulate the u64 representation of your fast
/// of your fast field until the last moment. /// field until the last moment.
/// ///
/// # Example /// # Example
/// ///
@@ -714,7 +715,10 @@ mod tests {
use crate::collector::Collector; use crate::collector::Collector;
use crate::query::{AllQuery, Query, QueryParser}; use crate::query::{AllQuery, Query, QueryParser};
use crate::schema::{Field, Schema, FAST, STORED, TEXT}; use crate::schema::{Field, Schema, FAST, STORED, TEXT};
use crate::{DocAddress, DocId, Index, IndexWriter, Score, SegmentReader}; use crate::Index;
use crate::IndexWriter;
use crate::Score;
use crate::{DocAddress, DocId, SegmentReader};
fn make_index() -> crate::Result<Index> { fn make_index() -> crate::Result<Index> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();

View File

@@ -1,6 +1,7 @@
use crate::collector::top_collector::{TopCollector, TopSegmentCollector}; use crate::collector::top_collector::{TopCollector, TopSegmentCollector};
use crate::collector::{Collector, SegmentCollector}; use crate::collector::{Collector, SegmentCollector};
use crate::{DocAddress, DocId, Result, Score, SegmentReader}; use crate::DocAddress;
use crate::{DocId, Result, Score, SegmentReader};
pub(crate) struct TweakedScoreTopCollector<TScoreTweaker, TScore = Score> { pub(crate) struct TweakedScoreTopCollector<TScoreTweaker, TScore = Score> {
score_tweaker: TScoreTweaker, score_tweaker: TScoreTweaker,
@@ -8,7 +9,8 @@ pub(crate) struct TweakedScoreTopCollector<TScoreTweaker, TScore = Score> {
} }
impl<TScoreTweaker, TScore> TweakedScoreTopCollector<TScoreTweaker, TScore> impl<TScoreTweaker, TScore> TweakedScoreTopCollector<TScoreTweaker, TScore>
where TScore: Clone + PartialOrd where
TScore: Clone + PartialOrd,
{ {
pub fn new( pub fn new(
score_tweaker: TScoreTweaker, score_tweaker: TScoreTweaker,
@@ -116,7 +118,8 @@ where
} }
impl<F, TScore> ScoreSegmentTweaker<TScore> for F impl<F, TScore> ScoreSegmentTweaker<TScore> for F
where F: 'static + FnMut(DocId, Score) -> TScore where
F: 'static + FnMut(DocId, Score) -> TScore,
{ {
fn score(&mut self, doc: DocId, score: Score) -> TScore { fn score(&mut self, doc: DocId, score: Score) -> TScore {
(self)(doc, score) (self)(doc, score)

View File

@@ -57,11 +57,7 @@ impl Executor {
let (idx, arg) = arg_with_idx; let (idx, arg) = arg_with_idx;
let fruit = f(arg); let fruit = f(arg);
if let Err(err) = fruit_sender.send((idx, fruit)) { if let Err(err) = fruit_sender.send((idx, fruit)) {
error!( error!("Failed to send search task. It probably means all search threads have panicked. {:?}", err);
"Failed to send search task. It probably means all search \
threads have panicked. {:?}",
err
);
} }
}); });
} }

View File

@@ -1,27 +1,35 @@
use super::{segment::Segment, IndexSettings};
use crate::core::Executor;
use crate::core::IndexMeta;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentMetaInventory;
use crate::core::META_FILEPATH;
use crate::directory::error::OpenReadError;
use crate::directory::ManagedDirectory;
#[cfg(feature = "mmap")]
use crate::directory::MmapDirectory;
use crate::directory::INDEX_WRITER_LOCK;
use crate::directory::{Directory, RamDirectory};
use crate::error::DataCorruption;
use crate::error::TantivyError;
use crate::indexer::index_writer::{HEAP_SIZE_MIN, MAX_NUM_THREAD};
use crate::indexer::segment_updater::save_new_metas;
use crate::reader::IndexReader;
use crate::reader::IndexReaderBuilder;
use crate::schema::Field;
use crate::schema::FieldType;
use crate::schema::Schema;
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::IndexWriter;
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt; use std::fmt;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use super::segment::Segment;
use super::IndexSettings;
use crate::core::{
Executor, IndexMeta, SegmentId, SegmentMeta, SegmentMetaInventory, META_FILEPATH,
};
use crate::directory::error::OpenReadError;
#[cfg(feature = "mmap")]
use crate::directory::MmapDirectory;
use crate::directory::{Directory, ManagedDirectory, RamDirectory, INDEX_WRITER_LOCK};
use crate::error::{DataCorruption, TantivyError};
use crate::indexer::index_writer::{MAX_NUM_THREAD, MEMORY_ARENA_NUM_BYTES_MIN};
use crate::indexer::segment_updater::save_new_metas;
use crate::reader::{IndexReader, IndexReaderBuilder};
use crate::schema::{Field, FieldType, Schema};
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
use crate::IndexWriter;
fn load_metas( fn load_metas(
directory: &dyn Directory, directory: &dyn Directory,
inventory: &SegmentMetaInventory, inventory: &SegmentMetaInventory,
@@ -70,6 +78,7 @@ fn load_metas(
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let settings = IndexSettings{sort_by_field: Some(IndexSortByField{field:"number".to_string(), order:Order::Asc}), ..Default::default()}; /// let settings = IndexSettings{sort_by_field: Some(IndexSortByField{field:"number".to_string(), order:Order::Asc}), ..Default::default()};
/// let index = Index::builder().schema(schema).settings(settings).create_in_ram(); /// let index = Index::builder().schema(schema).settings(settings).create_in_ram();
///
/// ``` /// ```
pub struct IndexBuilder { pub struct IndexBuilder {
schema: Option<Schema>, schema: Option<Schema>,
@@ -88,21 +97,16 @@ impl IndexBuilder {
index_settings: IndexSettings::default(), index_settings: IndexSettings::default(),
} }
} }
/// Set the settings /// Set the settings
#[must_use]
pub fn settings(mut self, settings: IndexSettings) -> Self { pub fn settings(mut self, settings: IndexSettings) -> Self {
self.index_settings = settings; self.index_settings = settings;
self self
} }
/// Set the schema /// Set the schema
#[must_use]
pub fn schema(mut self, schema: Schema) -> Self { pub fn schema(mut self, schema: Schema) -> Self {
self.schema = Some(schema); self.schema = Some(schema);
self self
} }
/// Creates a new index using the `RAMDirectory`. /// Creates a new index using the `RAMDirectory`.
/// ///
/// The index will be allocated in anonymous memory. /// The index will be allocated in anonymous memory.
@@ -113,7 +117,6 @@ impl IndexBuilder {
.create(ram_directory) .create(ram_directory)
.expect("Creating a RAMDirectory should never fail")) .expect("Creating a RAMDirectory should never fail"))
} }
/// Creates a new index in a given filepath. /// Creates a new index in a given filepath.
/// The index will use the `MMapDirectory`. /// The index will use the `MMapDirectory`.
/// ///
@@ -126,7 +129,6 @@ impl IndexBuilder {
} }
self.create(mmap_directory) self.create(mmap_directory)
} }
/// Creates a new index in a temp directory. /// Creates a new index in a temp directory.
/// ///
/// The index will use the `MMapDirectory` in a newly created directory. /// The index will use the `MMapDirectory` in a newly created directory.
@@ -140,14 +142,12 @@ impl IndexBuilder {
let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::create_from_tempdir()?); let mmap_directory: Box<dyn Directory> = Box::new(MmapDirectory::create_from_tempdir()?);
self.create(mmap_directory) self.create(mmap_directory)
} }
fn get_expect_schema(&self) -> crate::Result<Schema> { fn get_expect_schema(&self) -> crate::Result<Schema> {
self.schema self.schema
.as_ref() .as_ref()
.cloned() .cloned()
.ok_or(TantivyError::IndexBuilderMissingArgument("schema")) .ok_or(TantivyError::IndexBuilderMissingArgument("schema"))
} }
/// Opens or creates a new index in the provided directory /// Opens or creates a new index in the provided directory
pub fn open_or_create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> { pub fn open_or_create<T: Into<Box<dyn Directory>>>(self, dir: T) -> crate::Result<Index> {
let dir = dir.into(); let dir = dir.into();
@@ -397,18 +397,17 @@ impl Index {
/// - `num_threads` defines the number of indexing workers that /// - `num_threads` defines the number of indexing workers that
/// should work at the same time. /// should work at the same time.
/// ///
/// - `overall_memory_arena_in_bytes` sets the amount of memory /// - `overall_heap_size_in_bytes` sets the amount of memory
/// allocated for all indexing thread. /// allocated for all indexing thread.
/// Each thread will receive a budget of `overall_memory_arena_in_bytes / num_threads`. /// Each thread will receive a budget of `overall_heap_size_in_bytes / num_threads`.
/// ///
/// # Errors /// # Errors
/// If the lockfile already exists, returns `Error::DirectoryLockBusy` or an `Error::IoError`. /// If the lockfile already exists, returns `Error::DirectoryLockBusy` or an `Error::IoError`.
/// If the memory arena per thread is too small or too big, returns /// If the heap size per thread is too small or too big, returns `TantivyError::InvalidArgument`
/// `TantivyError::InvalidArgument`
pub fn writer_with_num_threads( pub fn writer_with_num_threads(
&self, &self,
num_threads: usize, num_threads: usize,
overall_memory_arena_in_bytes: usize, overall_heap_size_in_bytes: usize,
) -> crate::Result<IndexWriter> { ) -> crate::Result<IndexWriter> {
let directory_lock = self let directory_lock = self
.directory .directory
@@ -417,25 +416,26 @@ impl Index {
TantivyError::LockFailure( TantivyError::LockFailure(
err, err,
Some( Some(
"Failed to acquire index lock. If you are using a regular directory, this \ "Failed to acquire index lock. If you are using \
means there is already an `IndexWriter` working on this `Directory`, in \ a regular directory, this means there is already an \
this process or in a different process." `IndexWriter` working on this `Directory`, in this process \
or in a different process."
.to_string(), .to_string(),
), ),
) )
})?; })?;
let memory_arena_in_bytes_per_thread = overall_memory_arena_in_bytes / num_threads; let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
IndexWriter::new( IndexWriter::new(
self, self,
num_threads, num_threads,
memory_arena_in_bytes_per_thread, heap_size_in_bytes_per_thread,
directory_lock, directory_lock,
) )
} }
/// Helper to create an index writer for tests. /// Helper to create an index writer for tests.
/// ///
/// That index writer only simply has a single thread and a memory arena of 10 MB. /// That index writer only simply has a single thread and a heap of 10 MB.
/// Using a single thread gives us a deterministic allocation of DocId. /// Using a single thread gives us a deterministic allocation of DocId.
#[cfg(test)] #[cfg(test)]
pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> { pub fn writer_for_tests(&self) -> crate::Result<IndexWriter> {
@@ -446,28 +446,29 @@ impl Index {
/// ///
/// Tantivy will automatically define the number of threads to use, but /// Tantivy will automatically define the number of threads to use, but
/// no more than 8 threads. /// no more than 8 threads.
/// `overall_memory_arena_in_bytes` is the total target memory usage that will be split /// `overall_heap_size_in_bytes` is the total target memory usage that will be split
/// between a given number of threads. /// between a given number of threads.
/// ///
/// # Errors /// # Errors
/// If the lockfile already exists, returns `Error::FileAlreadyExists`. /// If the lockfile already exists, returns `Error::FileAlreadyExists`.
/// If the memory arena per thread is too small or too big, returns /// If the heap size per thread is too small or too big, returns `TantivyError::InvalidArgument`
/// `TantivyError::InvalidArgument` pub fn writer(&self, overall_heap_size_in_bytes: usize) -> crate::Result<IndexWriter> {
pub fn writer(&self, memory_arena_num_bytes: usize) -> crate::Result<IndexWriter> {
let mut num_threads = std::cmp::min(num_cpus::get(), MAX_NUM_THREAD); let mut num_threads = std::cmp::min(num_cpus::get(), MAX_NUM_THREAD);
let memory_arena_num_bytes_per_thread = memory_arena_num_bytes / num_threads; let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
if memory_arena_num_bytes_per_thread < MEMORY_ARENA_NUM_BYTES_MIN { if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
num_threads = (memory_arena_num_bytes / MEMORY_ARENA_NUM_BYTES_MIN).max(1); num_threads = (overall_heap_size_in_bytes / HEAP_SIZE_MIN).max(1);
} }
self.writer_with_num_threads(num_threads, memory_arena_num_bytes) self.writer_with_num_threads(num_threads, overall_heap_size_in_bytes)
} }
/// Accessor to the index settings /// Accessor to the index settings
///
pub fn settings(&self) -> &IndexSettings { pub fn settings(&self) -> &IndexSettings {
&self.settings &self.settings
} }
/// Accessor to the index settings /// Accessor to the index settings
///
pub fn settings_mut(&mut self) -> &mut IndexSettings { pub fn settings_mut(&mut self) -> &mut IndexSettings {
&mut self.settings &mut self.settings
} }
@@ -555,9 +556,15 @@ impl fmt::Debug for Index {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::{RamDirectory, WatchCallback}; use crate::schema::Field;
use crate::schema::{Field, Schema, INDEXED, TEXT}; use crate::schema::{Schema, INDEXED, TEXT};
use crate::{Directory, Index, IndexReader, IndexSettings, ReloadPolicy}; use crate::IndexReader;
use crate::ReloadPolicy;
use crate::{
directory::{RamDirectory, WatchCallback},
IndexSettings,
};
use crate::{Directory, Index};
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
@@ -666,12 +673,10 @@ mod tests {
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_specific { mod mmap_specific {
use std::path::PathBuf;
use tempfile::TempDir;
use super::*; use super::*;
use crate::Directory; use crate::Directory;
use std::path::PathBuf;
use tempfile::TempDir;
#[test] #[test]
fn test_index_on_commit_reload_policy_mmap() -> crate::Result<()> { fn test_index_on_commit_reload_policy_mmap() -> crate::Result<()> {

View File

@@ -1,16 +1,12 @@
use std::collections::HashSet;
use std::fmt;
use std::path::PathBuf;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use super::SegmentComponent; use super::SegmentComponent;
use crate::core::SegmentId;
use crate::schema::Schema; use crate::schema::Schema;
use crate::store::Compressor; use crate::Opstamp;
use crate::{Inventory, Opstamp, TrackedObject}; use crate::{core::SegmentId, store::Compressor};
use crate::{Inventory, TrackedObject};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::{collections::HashSet, sync::atomic::AtomicBool};
use std::{fmt, sync::Arc};
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
struct DeleteMeta { struct DeleteMeta {
@@ -192,7 +188,6 @@ impl SegmentMeta {
} }
#[doc(hidden)] #[doc(hidden)]
#[must_use]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
assert!( assert!(
num_deleted_docs <= self.max_doc(), num_deleted_docs <= self.max_doc(),
@@ -287,6 +282,7 @@ impl Order {
/// * the searchable segments, /// * the searchable segments,
/// * the index `docstamp` /// * the index `docstamp`
/// * the schema /// * the schema
///
#[derive(Clone, Serialize)] #[derive(Clone, Serialize)]
pub struct IndexMeta { pub struct IndexMeta {
/// `IndexSettings` to configure index options. /// `IndexSettings` to configure index options.
@@ -374,8 +370,10 @@ impl fmt::Debug for IndexMeta {
mod tests { mod tests {
use super::IndexMeta; use super::IndexMeta;
use crate::schema::{Schema, TEXT}; use crate::{
use crate::{IndexSettings, IndexSortByField, Order}; schema::{Schema, TEXT},
IndexSettings, IndexSortByField, Order,
};
#[test] #[test]
fn test_serialize_metas() { fn test_serialize_metas() {

View File

@@ -1,12 +1,13 @@
use std::io; use std::io;
use common::BinarySerializable;
use crate::directory::FileSlice; use crate::directory::FileSlice;
use crate::positions::PositionReader; use crate::positions::PositionReader;
use crate::postings::{BlockSegmentPostings, SegmentPostings, TermInfo}; use crate::postings::TermInfo;
use crate::schema::{IndexRecordOption, Term}; use crate::postings::{BlockSegmentPostings, SegmentPostings};
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
use common::BinarySerializable;
/// The inverted index reader is in charge of accessing /// The inverted index reader is in charge of accessing
/// the inverted index associated to a specific field. /// the inverted index associated to a specific field.

View File

@@ -8,10 +8,6 @@ mod segment_component;
mod segment_id; mod segment_id;
mod segment_reader; mod segment_reader;
use std::path::Path;
use once_cell::sync::Lazy;
pub use self::executor::Executor; pub use self::executor::Executor;
pub use self::index::{Index, IndexBuilder}; pub use self::index::{Index, IndexBuilder};
pub use self::index_meta::{ pub use self::index_meta::{
@@ -24,6 +20,9 @@ pub use self::segment_component::SegmentComponent;
pub use self::segment_id::SegmentId; pub use self::segment_id::SegmentId;
pub use self::segment_reader::SegmentReader; pub use self::segment_reader::SegmentReader;
use once_cell::sync::Lazy;
use std::path::Path;
/// The meta file contains all the information about the list of segments and the schema /// The meta file contains all the information about the list of segments and the schema
/// of the index. /// of the index.
pub static META_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new("meta.json")); pub static META_FILEPATH: Lazy<&'static Path> = Lazy::new(|| Path::new("meta.json"));

View File

@@ -1,13 +1,20 @@
use std::collections::BTreeMap;
use std::{fmt, io};
use crate::collector::Collector; use crate::collector::Collector;
use crate::core::{Executor, SegmentReader}; use crate::core::Executor;
use crate::core::SegmentReader;
use crate::query::Query; use crate::query::Query;
use crate::schema::{Document, Schema, Term}; use crate::schema::Document;
use crate::schema::Schema;
use crate::schema::Term;
use crate::space_usage::SearcherSpaceUsage; use crate::space_usage::SearcherSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::{DocAddress, Index, Opstamp, SegmentId, TrackedObject}; use crate::DocAddress;
use crate::Index;
use crate::Opstamp;
use crate::SegmentId;
use crate::TrackedObject;
use std::collections::BTreeMap;
use std::{fmt, io};
/// Identifies the searcher generation accessed by a [Searcher]. /// Identifies the searcher generation accessed by a [Searcher].
/// ///
@@ -62,6 +69,7 @@ impl SearcherGeneration {
/// ///
/// It guarantees that the `Segment` will not be removed before /// It guarantees that the `Segment` will not be removed before
/// the destruction of the `Searcher`. /// the destruction of the `Searcher`.
///
pub struct Searcher { pub struct Searcher {
schema: Schema, schema: Schema,
index: Index, index: Index,

View File

@@ -1,12 +1,14 @@
use std::fmt;
use std::path::PathBuf;
use super::SegmentComponent; use super::SegmentComponent;
use crate::core::{Index, SegmentId, SegmentMeta}; use crate::core::Index;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::{Directory, FileSlice, WritePtr}; use crate::directory::Directory;
use crate::directory::{FileSlice, WritePtr};
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use std::fmt;
use std::path::PathBuf;
/// A segment is a piece of the index. /// A segment is a piece of the index.
#[derive(Clone)] #[derive(Clone)]
@@ -54,7 +56,6 @@ impl Segment {
} }
#[doc(hidden)] #[doc(hidden)]
#[must_use]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
Segment { Segment {
index: self.index, index: self.index,

View File

@@ -1,14 +1,14 @@
use std::cmp::{Ord, Ordering}; use std::cmp::{Ord, Ordering};
use std::error::Error;
use std::fmt; use std::fmt;
use std::str::FromStr; use uuid::Uuid;
#[cfg(test)]
use std::sync::atomic;
#[cfg(test)] #[cfg(test)]
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use uuid::Uuid; use std::error::Error;
use std::str::FromStr;
#[cfg(test)]
use std::sync::atomic;
/// Uuid identifying a segment. /// Uuid identifying a segment.
/// ///

View File

@@ -1,19 +1,28 @@
use std::collections::HashMap; use crate::core::InvertedIndexReader;
use std::sync::{Arc, RwLock}; use crate::core::Segment;
use std::{fmt, io}; use crate::core::SegmentComponent;
use crate::core::SegmentId;
use fail::fail_point; use crate::directory::CompositeFile;
use crate::directory::FileSlice;
use crate::core::{InvertedIndexReader, Segment, SegmentComponent, SegmentId};
use crate::directory::{CompositeFile, FileSlice};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::fastfield::{intersect_alive_bitsets, AliveBitSet, FacetReader, FastFieldReaders}; use crate::fastfield::intersect_alive_bitsets;
use crate::fastfield::AliveBitSet;
use crate::fastfield::FacetReader;
use crate::fastfield::FastFieldReaders;
use crate::fieldnorm::{FieldNormReader, FieldNormReaders}; use crate::fieldnorm::{FieldNormReader, FieldNormReaders};
use crate::schema::{Field, FieldType, IndexRecordOption, Schema}; use crate::schema::FieldType;
use crate::schema::Schema;
use crate::schema::{Field, IndexRecordOption};
use crate::space_usage::SegmentSpaceUsage; use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
use crate::{DocId, Opstamp}; use crate::DocId;
use crate::Opstamp;
use fail::fail_point;
use std::fmt;
use std::sync::Arc;
use std::sync::RwLock;
use std::{collections::HashMap, io};
/// Entry point to access all of the datastructures of the `Segment` /// Entry point to access all of the datastructures of the `Segment`
/// ///
@@ -121,8 +130,7 @@ impl SegmentReader {
self.fieldnorm_readers.get_field(field)?.ok_or_else(|| { self.fieldnorm_readers.get_field(field)?.ok_or_else(|| {
let field_name = self.schema.get_field_name(field); let field_name = self.schema.get_field_name(field);
let err_msg = format!( let err_msg = format!(
"Field norm not found for field {:?}. Was the field set to record norm during \ "Field norm not found for field {:?}. Was the field set to record norm during indexing?",
indexing?",
field_name field_name
); );
crate::TantivyError::SchemaError(err_msg) crate::TantivyError::SchemaError(err_msg)
@@ -251,23 +259,18 @@ impl SegmentReader {
let record_option = record_option_opt.unwrap(); let record_option = record_option_opt.unwrap();
let postings_file = postings_file_opt.unwrap(); let postings_file = postings_file_opt.unwrap();
let termdict_file: FileSlice = let termdict_file: FileSlice = self.termdict_composite.open_read(field)
self.termdict_composite.open_read(field).ok_or_else(|| { .ok_or_else(||
DataCorruption::comment_only(format!( DataCorruption::comment_only(format!("Failed to open field {:?}'s term dictionary in the composite file. Has the schema been modified?", field_entry.name()))
"Failed to open field {:?}'s term dictionary in the composite file. Has the \ )?;
schema been modified?",
field_entry.name()
))
})?;
let positions_file = self.positions_composite.open_read(field).ok_or_else(|| { let positions_file = self
let error_msg = format!( .positions_composite
"Failed to open field {:?}'s positions in the composite file. Has the schema been \ .open_read(field)
modified?", .ok_or_else(|| {
field_entry.name() let error_msg = format!("Failed to open field {:?}'s positions in the composite file. Has the schema been modified?", field_entry.name());
); DataCorruption::comment_only(error_msg)
DataCorruption::comment_only(error_msg) })?;
})?;
let inv_idx_reader = Arc::new(InvertedIndexReader::new( let inv_idx_reader = Arc::new(InvertedIndexReader::new(
TermDictionary::open(termdict_file)?, TermDictionary::open(termdict_file)?,

View File

@@ -1,14 +1,17 @@
use crate::directory::FileSlice;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
use common::BinarySerializable;
use common::CountingWriter;
use common::HasLen;
use common::VInt;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::iter::ExactSizeIterator; use std::iter::ExactSizeIterator;
use std::ops::Range; use std::ops::Range;
use common::{BinarySerializable, CountingWriter, HasLen, VInt};
use crate::directory::{FileSlice, TerminatingWrite, WritePtr};
use crate::schema::Field;
use crate::space_usage::{FieldUsage, PerFieldSpaceUsage};
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)] #[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
pub struct FileAddr { pub struct FileAddr {
field: Field, field: Field,
@@ -183,14 +186,13 @@ impl CompositeFile {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use std::io::Write;
use std::path::Path;
use common::{BinarySerializable, VInt};
use super::{CompositeFile, CompositeWrite}; use super::{CompositeFile, CompositeWrite};
use crate::directory::{Directory, RamDirectory}; use crate::directory::{Directory, RamDirectory};
use crate::schema::Field; use crate::schema::Field;
use common::BinarySerializable;
use common::VInt;
use std::io::Write;
use std::path::Path;
#[test] #[test]
fn test_composite_file() -> crate::Result<()> { fn test_composite_file() -> crate::Result<()> {

View File

@@ -1,12 +1,18 @@
use std::io::Write;
use std::marker::{Send, Sync};
use std::path::{Path, PathBuf};
use std::time::Duration;
use std::{fmt, io, thread};
use crate::directory::directory_lock::Lock; use crate::directory::directory_lock::Lock;
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::LockError;
use crate::directory::{FileHandle, FileSlice, WatchCallback, WatchHandle, WritePtr}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::WatchHandle;
use crate::directory::{FileHandle, WatchCallback};
use crate::directory::{FileSlice, WritePtr};
use std::fmt;
use std::io;
use std::io::Write;
use std::marker::Send;
use std::marker::Sync;
use std::path::Path;
use std::path::PathBuf;
use std::thread;
use std::time::Duration;
/// Retry the logic of acquiring locks is pretty simple. /// Retry the logic of acquiring locks is pretty simple.
/// We just retry `n` times after a given `duratio`, both /// We just retry `n` times after a given `duratio`, both
@@ -227,7 +233,8 @@ pub trait DirectoryClone {
} }
impl<T> DirectoryClone for T impl<T> DirectoryClone for T
where T: 'static + Directory + Clone where
T: 'static + Directory + Clone,
{ {
fn box_clone(&self) -> Box<dyn Directory> { fn box_clone(&self) -> Box<dyn Directory> {
Box::new(self.clone()) Box::new(self.clone())

View File

@@ -1,6 +1,5 @@
use std::path::PathBuf;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::path::PathBuf;
/// A directory lock. /// A directory lock.
/// ///
@@ -12,6 +11,7 @@ use once_cell::sync::Lazy;
/// - [META_LOCK] /// - [META_LOCK]
/// ///
/// Check out these locks documentation for more information. /// Check out these locks documentation for more information.
///
#[derive(Debug)] #[derive(Debug)]
pub struct Lock { pub struct Lock {
/// The lock needs to be associated with its own file `path`. /// The lock needs to be associated with its own file `path`.

View File

@@ -1,17 +1,15 @@
use std::path::PathBuf;
use std::{fmt, io};
use crate::Version; use crate::Version;
use std::fmt;
use std::io;
use std::path::PathBuf;
/// Error while trying to acquire a directory lock. /// Error while trying to acquire a directory lock.
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum LockError { pub enum LockError {
/// Failed to acquired a lock as it is already held by another /// Failed to acquired a lock as it is already held by another
/// client. /// client.
/// - In the context of a blocking lock, this means the lock was not released within some /// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
/// `timeout` period. /// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the
/// call.
#[error("Could not acquire lock as it is already held, possibly by a different process.")] #[error("Could not acquire lock as it is already held, possibly by a different process.")]
LockBusy, LockBusy,
/// Trying to acquire a lock failed with an `IoError` /// Trying to acquire a lock failed with an `IoError`

View File

@@ -1,11 +1,11 @@
use std::ops::{Deref, Range};
use std::sync::{Arc, Weak};
use std::{fmt, io};
use common::HasLen;
use stable_deref_trait::StableDeref; use stable_deref_trait::StableDeref;
use crate::directory::OwnedBytes; use crate::directory::OwnedBytes;
use common::HasLen;
use std::fmt;
use std::ops::Range;
use std::sync::{Arc, Weak};
use std::{io, ops::Deref};
pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>; pub type ArcBytes = Arc<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>; pub type WeakArcBytes = Weak<dyn Deref<Target = [u8]> + Send + Sync + 'static>;
@@ -33,7 +33,8 @@ impl FileHandle for &'static [u8] {
} }
impl<B> From<B> for FileSlice impl<B> From<B> for FileSlice
where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync where
B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync,
{ {
fn from(bytes: B) -> FileSlice { fn from(bytes: B) -> FileSlice {
FileSlice::new(Box::new(OwnedBytes::new(bytes))) FileSlice::new(Box::new(OwnedBytes::new(bytes)))
@@ -43,6 +44,7 @@ where B: StableDeref + Deref<Target = [u8]> + 'static + Send + Sync
/// Logical slice of read only file in tantivy. /// Logical slice of read only file in tantivy.
/// ///
/// It can be cloned and sliced cheaply. /// It can be cloned and sliced cheaply.
///
#[derive(Clone)] #[derive(Clone)]
pub struct FileSlice { pub struct FileSlice {
data: Arc<dyn FileHandle>, data: Arc<dyn FileHandle>,
@@ -77,7 +79,6 @@ impl FileSlice {
/// # Panics /// # Panics
/// ///
/// Panics if `byte_range.end` exceeds the filesize. /// Panics if `byte_range.end` exceeds the filesize.
#[must_use]
pub fn slice(&self, byte_range: Range<usize>) -> FileSlice { pub fn slice(&self, byte_range: Range<usize>) -> FileSlice {
assert!(byte_range.end <= self.len()); assert!(byte_range.end <= self.len());
FileSlice { FileSlice {
@@ -137,7 +138,6 @@ impl FileSlice {
/// boundary. /// boundary.
/// ///
/// Equivalent to `.slice(from_offset, self.len())` /// Equivalent to `.slice(from_offset, self.len())`
#[must_use]
pub fn slice_from(&self, from_offset: usize) -> FileSlice { pub fn slice_from(&self, from_offset: usize) -> FileSlice {
self.slice(from_offset..self.len()) self.slice(from_offset..self.len())
} }
@@ -145,7 +145,6 @@ impl FileSlice {
/// Returns a slice from the end. /// Returns a slice from the end.
/// ///
/// Equivalent to `.slice(self.len() - from_offset, self.len())` /// Equivalent to `.slice(self.len() - from_offset, self.len())`
#[must_use]
pub fn slice_from_end(&self, from_offset: usize) -> FileSlice { pub fn slice_from_end(&self, from_offset: usize) -> FileSlice {
self.slice(self.len() - from_offset..self.len()) self.slice(self.len() - from_offset..self.len())
} }
@@ -154,7 +153,6 @@ impl FileSlice {
/// boundary. /// boundary.
/// ///
/// Equivalent to `.slice(0, to_offset)` /// Equivalent to `.slice(0, to_offset)`
#[must_use]
pub fn slice_to(&self, to_offset: usize) -> FileSlice { pub fn slice_to(&self, to_offset: usize) -> FileSlice {
self.slice(0..to_offset) self.slice(0..to_offset)
} }
@@ -174,11 +172,9 @@ impl HasLen for FileSlice {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::io;
use common::HasLen;
use super::{FileHandle, FileSlice}; use super::{FileHandle, FileSlice};
use common::HasLen;
use std::io;
#[test] #[test]
fn test_file_slice() -> io::Result<()> { fn test_file_slice() -> io::Result<()> {

View File

@@ -1,13 +1,13 @@
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
use crc32fast::Hasher;
use std::fs;
use std::io;
use std::io::BufRead; use std::io::BufRead;
use std::path::Path; use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::thread;
use std::time::Duration; use std::time::Duration;
use std::{fs, io, thread};
use crc32fast::Hasher;
use crate::directory::{WatchCallback, WatchCallbackList, WatchHandle};
pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 }); pub const POLLING_INTERVAL: Duration = Duration::from_millis(if cfg!(test) { 1 } else { 500 });
@@ -99,9 +99,10 @@ mod tests {
use std::mem; use std::mem;
use super::*;
use crate::directory::mmap_directory::atomic_write; use crate::directory::mmap_directory::atomic_write;
use super::*;
#[test] #[test]
fn test_file_watcher_drop_watcher() -> crate::Result<()> { fn test_file_watcher_drop_watcher() -> crate::Result<()> {
let tmp_dir = tempfile::TempDir::new()?; let tmp_dir = tempfile::TempDir::new()?;

View File

@@ -1,13 +1,14 @@
use std::io; use crate::directory::error::Incompatibility;
use std::io::Write; use crate::directory::FileSlice;
use crate::{
directory::{AntiCallToken, TerminatingWrite},
Version, INDEX_FORMAT_VERSION,
};
use common::{BinarySerializable, CountingWriter, DeserializeFrom, FixedSize, HasLen}; use common::{BinarySerializable, CountingWriter, DeserializeFrom, FixedSize, HasLen};
use crc32fast::Hasher; use crc32fast::Hasher;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::io;
use crate::directory::error::Incompatibility; use std::io::Write;
use crate::directory::{AntiCallToken, FileSlice, TerminatingWrite};
use crate::{Version, INDEX_FORMAT_VERSION};
const FOOTER_MAX_LEN: u32 = 50_000; const FOOTER_MAX_LEN: u32 = 50_000;
@@ -63,9 +64,7 @@ impl Footer {
if footer_magic_byte != FOOTER_MAGIC_NUMBER { if footer_magic_byte != FOOTER_MAGIC_NUMBER {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::InvalidData, io::ErrorKind::InvalidData,
"Footer magic byte mismatch. File corrupted or index was created using old an \ "Footer magic byte mismatch. File corrupted or index was created using old an tantivy version which is not supported anymore. Please use tantivy 0.15 or above to recreate the index.",
tantivy version which is not supported anymore. Please use tantivy 0.15 or above \
to recreate the index.",
)); ));
} }
@@ -74,7 +73,7 @@ impl Footer {
io::ErrorKind::InvalidData, io::ErrorKind::InvalidData,
format!( format!(
"Footer seems invalid as it suggests a footer len of {}. File is corrupted, \ "Footer seems invalid as it suggests a footer len of {}. File is corrupted, \
or the index was created with a different & old version of tantivy.", or the index was created with a different & old version of tantivy.",
footer_len footer_len
), ),
)); ));
@@ -155,12 +154,11 @@ impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::io; use crate::directory::footer::Footer;
use crate::directory::OwnedBytes;
use crate::directory::{footer::FOOTER_MAGIC_NUMBER, FileSlice};
use common::BinarySerializable; use common::BinarySerializable;
use std::io;
use crate::directory::footer::{Footer, FOOTER_MAGIC_NUMBER};
use crate::directory::{FileSlice, OwnedBytes};
#[test] #[test]
fn test_deserialize_footer() { fn test_deserialize_footer() {
@@ -185,9 +183,8 @@ mod tests {
let err = Footer::extract_footer(fileslice).unwrap_err(); let err = Footer::extract_footer(fileslice).unwrap_err();
assert_eq!( assert_eq!(
err.to_string(), err.to_string(),
"Footer magic byte mismatch. File corrupted or index was created using old an tantivy \ "Footer magic byte mismatch. File corrupted or index was created using old an tantivy version which \
version which is not supported anymore. Please use tantivy 0.15 or above to recreate \ is not supported anymore. Please use tantivy 0.15 or above to recreate the index."
the index."
); );
} }
#[test] #[test]
@@ -222,8 +219,8 @@ mod tests {
assert_eq!(err.kind(), io::ErrorKind::InvalidData); assert_eq!(err.kind(), io::ErrorKind::InvalidData);
assert_eq!( assert_eq!(
err.to_string(), err.to_string(),
"Footer seems invalid as it suggests a footer len of 50001. File is corrupted, or the \ "Footer seems invalid as it suggests a footer len of 50001. File is corrupted, \
index was created with a different & old version of tantivy." or the index was created with a different & old version of tantivy."
); );
} }
} }

View File

@@ -1,21 +1,24 @@
use std::collections::HashSet;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock, RwLockWriteGuard};
use std::{io, result};
use crc32fast::Hasher;
use crate::core::MANAGED_FILEPATH; use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy}; use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::{ use crate::directory::GarbageCollectionResult;
DirectoryLock, FileHandle, FileSlice, GarbageCollectionResult, Lock, WatchCallback, use crate::directory::Lock;
WatchHandle, WritePtr, META_LOCK, use crate::directory::META_LOCK;
}; use crate::directory::{DirectoryLock, FileHandle};
use crate::directory::{FileSlice, WritePtr};
use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
use crc32fast::Hasher;
use std::collections::HashSet;
use std::io;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::result;
use std::sync::RwLockWriteGuard;
use std::sync::{Arc, RwLock};
/// Returns true iff the file is "managed". /// Returns true iff the file is "managed".
/// Non-managed file are not subject to garbage collection. /// Non-managed file are not subject to garbage collection.
/// ///
@@ -341,14 +344,12 @@ impl Clone for ManagedDirectory {
#[cfg(test)] #[cfg(test)]
mod tests_mmap_specific { mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
use std::collections::HashSet; use std::collections::HashSet;
use std::io::Write; use std::io::Write;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use tempfile::TempDir; use tempfile::TempDir;
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
#[test] #[test]
fn test_managed_directory() { fn test_managed_directory() {
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new().unwrap();

View File

@@ -1,28 +1,32 @@
use std::collections::HashMap; use crate::core::META_FILEPATH;
use std::convert::From; use crate::directory::error::LockError;
use std::fs::{self, File, OpenOptions}; use crate::directory::error::{DeleteError, OpenDirectoryError, OpenReadError, OpenWriteError};
use std::io::{self, BufWriter, Read, Seek, SeekFrom, Write}; use crate::directory::file_watcher::FileWatcher;
use std::ops::Deref; use crate::directory::Directory;
use std::path::{Path, PathBuf}; use crate::directory::DirectoryLock;
use std::sync::{Arc, RwLock}; use crate::directory::Lock;
use std::{fmt, result}; use crate::directory::WatchCallback;
use crate::directory::WatchHandle;
use crate::directory::{AntiCallToken, FileHandle, OwnedBytes};
use crate::directory::{ArcBytes, WeakArcBytes};
use crate::directory::{TerminatingWrite, WritePtr};
use fs2::FileExt; use fs2::FileExt;
use memmap2::Mmap; use memmap2::Mmap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use stable_deref_trait::StableDeref; use stable_deref_trait::StableDeref;
use std::convert::From;
use std::fmt;
use std::fs::OpenOptions;
use std::fs::{self, File};
use std::io::{self, Seek, SeekFrom};
use std::io::{BufWriter, Read, Write};
use std::path::{Path, PathBuf};
use std::result;
use std::sync::Arc;
use std::sync::RwLock;
use std::{collections::HashMap, ops::Deref};
use tempfile::TempDir; use tempfile::TempDir;
use crate::core::META_FILEPATH;
use crate::directory::error::{
DeleteError, LockError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::file_watcher::FileWatcher;
use crate::directory::{
AntiCallToken, ArcBytes, Directory, DirectoryLock, FileHandle, Lock, OwnedBytes,
TerminatingWrite, WatchCallback, WatchHandle, WeakArcBytes, WritePtr,
};
/// Create a default io error given a string. /// Create a default io error given a string.
pub(crate) fn make_io_err(msg: String) -> io::Error { pub(crate) fn make_io_err(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg) io::Error::new(io::ErrorKind::Other, msg)
@@ -316,7 +320,8 @@ impl Directory for MmapDirectory {
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| { let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!( let msg = format!(
"Failed to acquired write lock on mmap cache while reading {:?}", "Failed to acquired write lock \
on mmap cache while reading {:?}",
path path
); );
let io_err = make_io_err(msg); let io_err = make_io_err(msg);
@@ -452,7 +457,6 @@ impl Directory for MmapDirectory {
#[cfg(windows)] #[cfg(windows)]
{ {
use std::os::windows::fs::OpenOptionsExt; use std::os::windows::fs::OpenOptionsExt;
use winapi::um::winbase; use winapi::um::winbase;
open_opts open_opts
@@ -472,12 +476,15 @@ mod tests {
// There are more tests in directory/mod.rs // There are more tests in directory/mod.rs
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use common::HasLen;
use super::*; use super::*;
use crate::indexer::LogMergePolicy; use crate::indexer::LogMergePolicy;
use crate::schema::{Schema, SchemaBuilder, TEXT}; use crate::Index;
use crate::{Index, IndexSettings, ReloadPolicy}; use crate::ReloadPolicy;
use crate::{
schema::{Schema, SchemaBuilder, TEXT},
IndexSettings,
};
use common::HasLen;
#[test] #[test]
fn test_open_non_existent_path() { fn test_open_non_existent_path() {
@@ -514,7 +521,7 @@ mod tests {
{ {
for path in &paths { for path in &paths {
let mut w = mmap_directory.open_write(path).unwrap(); let mut w = mmap_directory.open_write(path).unwrap();
w.write_all(content).unwrap(); w.write(content).unwrap();
w.flush().unwrap(); w.flush().unwrap();
} }
} }

View File

@@ -1,4 +1,8 @@
//! WORM (Write Once Read Many) directory abstraction. /*!
WORM (Write Once Read Many) directory abstraction.
*/
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_directory; mod mmap_directory;
@@ -18,19 +22,19 @@ pub mod error;
mod composite_file; mod composite_file;
use std::io::BufWriter;
use std::path::PathBuf;
pub use common::{AntiCallToken, TerminatingWrite};
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite}; pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::directory::{Directory, DirectoryClone, DirectoryLock}; pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes}; pub(crate) use self::file_slice::{ArcBytes, WeakArcBytes};
pub use self::file_slice::{FileHandle, FileSlice}; pub use self::file_slice::{FileHandle, FileSlice};
pub use self::owned_bytes::OwnedBytes; pub use self::owned_bytes::OwnedBytes;
pub use self::ram_directory::RamDirectory; pub use self::ram_directory::RamDirectory;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle}; pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
pub use common::AntiCallToken;
pub use common::TerminatingWrite;
use std::io::BufWriter;
use std::path::PathBuf;
/// Outcome of the Garbage collection /// Outcome of the Garbage collection
pub struct GarbageCollectionResult { pub struct GarbageCollectionResult {
@@ -46,10 +50,11 @@ pub struct GarbageCollectionResult {
pub failed_to_delete_files: Vec<PathBuf>, pub failed_to_delete_files: Vec<PathBuf>,
} }
pub use self::managed_directory::ManagedDirectory;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory; pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory;
/// Write object for Directory. /// Write object for Directory.
/// ///
/// `WritePtr` are required to implement both Write /// `WritePtr` are required to implement both Write

View File

@@ -1,10 +1,9 @@
use crate::directory::FileHandle;
use std::io; use std::io;
use std::ops::Range; use std::ops::Range;
pub use ownedbytes::OwnedBytes; pub use ownedbytes::OwnedBytes;
use crate::directory::FileHandle;
impl FileHandle for OwnedBytes { impl FileHandle for OwnedBytes {
fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> { fn read_bytes(&self, range: Range<usize>) -> io::Result<OwnedBytes> {
Ok(self.slice(range)) Ok(self.slice(range))

View File

@@ -1,19 +1,19 @@
use std::collections::HashMap;
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use std::{fmt, result};
use common::HasLen;
use fail::fail_point;
use super::FileHandle;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::{ use crate::directory::AntiCallToken;
AntiCallToken, Directory, FileSlice, TerminatingWrite, WatchCallback, WatchCallbackList, use crate::directory::WatchCallbackList;
WatchHandle, WritePtr, use crate::directory::{Directory, FileSlice, WatchCallback, WatchHandle};
}; use crate::directory::{TerminatingWrite, WritePtr};
use common::HasLen;
use fail::fail_point;
use std::collections::HashMap;
use std::fmt;
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
use std::path::{Path, PathBuf};
use std::result;
use std::sync::{Arc, RwLock};
use super::FileHandle;
/// Writer associated with the `RamDirectory` /// Writer associated with the `RamDirectory`
/// ///
@@ -40,9 +40,7 @@ impl Drop for VecWriter {
fn drop(&mut self) { fn drop(&mut self) {
if !self.is_flushed { if !self.is_flushed {
warn!( warn!(
"You forgot to flush {:?} before its writter got Drop. Do not rely on drop. This \ "You forgot to flush {:?} before its writter got Drop. Do not rely on drop. This also occurs when the indexer crashed, so you may want to check the logs for the root cause.",
also occurs when the indexer crashed, so you may want to check the logs for the \
root cause.",
self.path self.path
) )
} }
@@ -125,6 +123,7 @@ impl fmt::Debug for RamDirectory {
/// ///
/// It is mainly meant for unit testing. /// It is mainly meant for unit testing.
/// Writes are only made visible upon flushing. /// Writes are only made visible upon flushing.
///
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub struct RamDirectory { pub struct RamDirectory {
fs: Arc<RwLock<InnerDirectory>>, fs: Arc<RwLock<InnerDirectory>>,
@@ -234,11 +233,10 @@ impl Directory for RamDirectory {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::io::Write;
use std::path::Path;
use super::RamDirectory; use super::RamDirectory;
use crate::Directory; use crate::Directory;
use std::io::Write;
use std::path::Path;
#[test] #[test]
fn test_persist() { fn test_persist() {

View File

@@ -1,3 +1,6 @@
use super::*;
use futures::channel::oneshot;
use futures::executor::block_on;
use std::io::Write; use std::io::Write;
use std::mem; use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@@ -6,11 +9,6 @@ use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use futures::channel::oneshot;
use futures::executor::block_on;
use super::*;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_directory_tests { mod mmap_directory_tests {
use crate::directory::MmapDirectory; use crate::directory::MmapDirectory;

View File

@@ -1,7 +1,8 @@
use std::sync::{Arc, RwLock, Weak};
use futures::channel::oneshot; use futures::channel::oneshot;
use futures::{Future, TryFutureExt}; use futures::{Future, TryFutureExt};
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::Weak;
/// Cloneable wrapper for callbacks registered when watching files of a `Directory`. /// Cloneable wrapper for callbacks registered when watching files of a `Directory`.
#[derive(Clone)] #[derive(Clone)]
@@ -102,14 +103,12 @@ impl WatchCallbackList {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::{WatchCallback, WatchCallbackList};
use futures::executor::block_on;
use std::mem; use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use futures::executor::block_on;
use crate::directory::{WatchCallback, WatchCallbackList};
#[test] #[test]
fn test_watch_event_router_simple() { fn test_watch_event_router_simple() {
let watch_event_router = WatchCallbackList::default(); let watch_event_router = WatchCallbackList::default();

View File

@@ -1,7 +1,7 @@
use std::borrow::{Borrow, BorrowMut};
use crate::fastfield::AliveBitSet; use crate::fastfield::AliveBitSet;
use crate::DocId; use crate::DocId;
use std::borrow::Borrow;
use std::borrow::BorrowMut;
/// Sentinel value returned when a DocSet has been entirely consumed. /// Sentinel value returned when a DocSet has been entirely consumed.
/// ///

View File

@@ -1,14 +1,17 @@
//! Definition of Tantivy's error and result. //! Definition of Tantivy's error and result.
use std::io;
use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError;
use crate::query;
use crate::{
directory::error::{OpenDirectoryError, OpenReadError, OpenWriteError},
schema,
};
use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::PoisonError; use std::sync::PoisonError;
use std::{fmt, io};
use crate::directory::error::{
Incompatibility, LockError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::fastfield::FastFieldNotAvailableError;
use crate::{query, schema};
/// Represents a `DataCorruption` error. /// Represents a `DataCorruption` error.
/// ///

View File

@@ -1,11 +1,11 @@
use std::io;
use std::io::Write;
use common::{intersect_bitsets, BitSet, ReadOnlyBitSet};
use ownedbytes::OwnedBytes;
use crate::space_usage::ByteCount; use crate::space_usage::ByteCount;
use crate::DocId; use crate::DocId;
use common::intersect_bitsets;
use common::BitSet;
use common::ReadOnlyBitSet;
use ownedbytes::OwnedBytes;
use std::io;
use std::io::Write;
/// Write a alive `BitSet` /// Write a alive `BitSet`
/// ///
@@ -168,12 +168,11 @@ mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use super::AliveBitSet;
use rand::prelude::IteratorRandom; use rand::prelude::IteratorRandom;
use rand::thread_rng; use rand::thread_rng;
use test::Bencher; use test::Bencher;
use super::AliveBitSet;
fn get_alive() -> Vec<u32> { fn get_alive() -> Vec<u32> {
let mut data = (0..1_000_000_u32).collect::<Vec<u32>>(); let mut data = (0..1_000_000_u32).collect::<Vec<u32>>();
for _ in 0..(1_000_000) * 1 / 8 { for _ in 0..(1_000_000) * 1 / 8 {

View File

@@ -6,11 +6,10 @@ pub use self::writer::BytesFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::ops::Deref; use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value};
use crate::{query::TermQuery, schema::FAST, schema::INDEXED, schema::STORED};
use crate::query::TermQuery;
use crate::schema::{BytesOptions, IndexRecordOption, Schema, Value, FAST, INDEXED, STORED};
use crate::{DocAddress, DocSet, Index, Searcher, Term}; use crate::{DocAddress, DocSet, Index, Searcher, Term};
use std::ops::Deref;
#[test] #[test]
fn test_bytes() -> crate::Result<()> { fn test_bytes() -> crate::Result<()> {
@@ -63,7 +62,7 @@ mod tests {
assert_eq!(values.len(), 2); assert_eq!(values.len(), 2);
let values_bytes: Vec<&[u8]> = values let values_bytes: Vec<&[u8]> = values
.into_iter() .into_iter()
.flat_map(|value| value.as_bytes()) .flat_map(|value| value.bytes_value())
.collect(); .collect();
assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]); assert_eq!(values_bytes, &[&b"tantivy"[..], &b"lucene"[..]]);
Ok(()) Ok(())

View File

@@ -1,5 +1,6 @@
use crate::directory::{FileSlice, OwnedBytes}; use crate::directory::FileSlice;
use crate::fastfield::{DynamicFastFieldReader, FastFieldReader, MultiValueLength}; use crate::directory::OwnedBytes;
use crate::fastfield::{BitpackedFastFieldReader, FastFieldReader, MultiValueLength};
use crate::DocId; use crate::DocId;
/// Reader for byte array fast fields /// Reader for byte array fast fields
@@ -14,13 +15,13 @@ use crate::DocId;
/// and the start index for the next document, and keeping the bytes in between. /// and the start index for the next document, and keeping the bytes in between.
#[derive(Clone)] #[derive(Clone)]
pub struct BytesFastFieldReader { pub struct BytesFastFieldReader {
idx_reader: DynamicFastFieldReader<u64>, idx_reader: BitpackedFastFieldReader<u64>,
values: OwnedBytes, values: OwnedBytes,
} }
impl BytesFastFieldReader { impl BytesFastFieldReader {
pub(crate) fn open( pub(crate) fn open(
idx_reader: DynamicFastFieldReader<u64>, idx_reader: BitpackedFastFieldReader<u64>,
values_file: FileSlice, values_file: FileSlice,
) -> crate::Result<BytesFastFieldReader> { ) -> crate::Result<BytesFastFieldReader> {
let values = values_file.read_bytes()?; let values = values_file.read_bytes()?;

View File

@@ -1,9 +1,10 @@
use std::io; use std::io;
use crate::fastfield::serializer::CompositeFastFieldSerializer;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::schema::{Document, Field, Value}; use crate::schema::{Document, Field, Value};
use crate::DocId; use crate::DocId;
use crate::{
fastfield::serializer::CompositeFastFieldSerializer, indexer::doc_id_mapping::DocIdMapping,
};
/// Writer for byte array (as in, any number of bytes per document) fast fields /// Writer for byte array (as in, any number of bytes per document) fast fields
/// ///

View File

@@ -1,6 +1,5 @@
use std::result;
use crate::schema::FieldEntry; use crate::schema::FieldEntry;
use std::result;
/// `FastFieldNotAvailableError` is returned when the /// `FastFieldNotAvailableError` is returned when the
/// user requested for a fast field reader, and the field was not /// user requested for a fast field reader, and the field was not

View File

@@ -1,10 +1,10 @@
use std::str;
use super::MultiValuedFastFieldReader; use super::MultiValuedFastFieldReader;
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::schema::Facet; use crate::schema::Facet;
use crate::termdict::{TermDictionary, TermOrdinal}; use crate::termdict::TermDictionary;
use crate::termdict::TermOrdinal;
use crate::DocId; use crate::DocId;
use std::str;
/// The facet reader makes it possible to access the list of /// The facet reader makes it possible to access the list of
/// facets associated to a given document in a specific /// facets associated to a given document in a specific
@@ -82,8 +82,11 @@ impl FacetReader {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::schema::{Facet, FacetOptions, SchemaBuilder, Value, STORED}; use crate::Index;
use crate::{DocAddress, Document, Index}; use crate::{
schema::{Facet, FacetOptions, SchemaBuilder, Value, STORED},
DocAddress, Document,
};
#[test] #[test]
fn test_facet_only_indexed() -> crate::Result<()> { fn test_facet_only_indexed() -> crate::Result<()> {
@@ -103,7 +106,7 @@ mod tests {
facet_reader.facet_ords(0u32, &mut facet_ords); facet_reader.facet_ords(0u32, &mut facet_ords);
assert_eq!(&facet_ords, &[2u64]); assert_eq!(&facet_ords, &[2u64]);
let doc = searcher.doc(DocAddress::new(0u32, 0u32))?; let doc = searcher.doc(DocAddress::new(0u32, 0u32))?;
let value = doc.get_first(facet_field).and_then(Value::as_facet); let value = doc.get_first(facet_field).and_then(Value::facet);
assert_eq!(value, None); assert_eq!(value, None);
Ok(()) Ok(())
} }
@@ -126,7 +129,7 @@ mod tests {
facet_reader.facet_ords(0u32, &mut facet_ords); facet_reader.facet_ords(0u32, &mut facet_ords);
assert_eq!(&facet_ords, &[2u64]); assert_eq!(&facet_ords, &[2u64]);
let doc = searcher.doc(DocAddress::new(0u32, 0u32))?; let doc = searcher.doc(DocAddress::new(0u32, 0u32))?;
let value: Option<&Facet> = doc.get_first(facet_field).and_then(Value::as_facet); let value: Option<&Facet> = doc.get_first(facet_field).and_then(Value::facet);
assert_eq!(value, Facet::from_text("/a/b").ok().as_ref()); assert_eq!(value, Facet::from_text("/a/b").ok().as_ref());
Ok(()) Ok(())
} }

View File

@@ -1,38 +1,51 @@
//! Column oriented field storage for tantivy. /*!
//! Column oriented field storage for tantivy.
//! It is the equivalent of `Lucene`'s `DocValues`.
//!
//! Fast fields is a column-oriented fashion storage of `tantivy`.
//!
//! It is designed for the fast random access of some document
//! fields given a document id.
//!
//! `FastField` are useful when a field is required for all or most of
//! the `DocSet` : for instance for scoring, grouping, filtering, or faceting.
//!
//!
//! Fields have to be declared as `FAST` in the schema.
//! Currently only 64-bits integers (signed or unsigned) are
//! supported.
//!
//! They are stored in a bit-packed fashion so that their
//! memory usage is directly linear with the amplitude of the
//! values stored.
//!
//! Read access performance is comparable to that of an array lookup.
pub use self::alive_bitset::{intersect_alive_bitsets, write_alive_bitset, AliveBitSet}; It is the equivalent of `Lucene`'s `DocValues`.
Fast fields is a column-oriented fashion storage of `tantivy`.
It is designed for the fast random access of some document
fields given a document id.
`FastField` are useful when a field is required for all or most of
the `DocSet` : for instance for scoring, grouping, filtering, or faceting.
Fields have to be declared as `FAST` in the schema.
Currently only 64-bits integers (signed or unsigned) are
supported.
They are stored in a bit-packed fashion so that their
memory usage is directly linear with the amplitude of the
values stored.
Read access performance is comparable to that of an array lookup.
*/
pub use self::alive_bitset::intersect_alive_bitsets;
pub use self::alive_bitset::write_alive_bitset;
pub use self::alive_bitset::AliveBitSet;
pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter}; pub use self::bytes::{BytesFastFieldReader, BytesFastFieldWriter};
pub use self::error::{FastFieldNotAvailableError, Result}; pub use self::error::{FastFieldNotAvailableError, Result};
pub use self::facet_reader::FacetReader; pub use self::facet_reader::FacetReader;
pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter}; pub use self::multivalued::{MultiValuedFastFieldReader, MultiValuedFastFieldWriter};
pub use self::reader::{DynamicFastFieldReader, FastFieldReader}; pub(crate) use self::reader::BitpackedFastFieldReader;
pub use self::reader::DynamicFastFieldReader;
pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::serializer::{CompositeFastFieldSerializer, FastFieldDataAccess, FastFieldStats}; pub use self::serializer::CompositeFastFieldSerializer;
pub use self::serializer::FastFieldDataAccess;
pub use self::serializer::FastFieldStats;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::chrono::{NaiveDateTime, Utc}; use crate::schema::Cardinality;
use crate::schema::{Cardinality, FieldType, Type, Value}; use crate::schema::FieldType;
use crate::schema::Value;
use crate::DocId; use crate::DocId;
use crate::{
chrono::{NaiveDateTime, Utc},
schema::Type,
};
mod alive_bitset; mod alive_bitset;
mod bytes; mod bytes;
@@ -200,20 +213,22 @@ fn value_to_u64(value: &Value) -> u64 {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashMap; use super::*;
use std::path::Path; use crate::directory::CompositeFile;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::merge_policy::NoMergePolicy;
use crate::schema::Field;
use crate::schema::Schema;
use crate::schema::FAST;
use crate::schema::{Document, IntOptions};
use crate::{Index, SegmentId, SegmentReader};
use common::HasLen; use common::HasLen;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::SeedableRng; use rand::SeedableRng;
use std::collections::HashMap;
use super::*; use std::path::Path;
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::merge_policy::NoMergePolicy;
use crate::schema::{Document, Field, IntOptions, Schema, FAST};
use crate::{Index, SegmentId, SegmentReader};
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| { pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -392,7 +407,7 @@ mod tests {
serializer.close().unwrap(); serializer.close().unwrap();
} }
let file = directory.open_read(path).unwrap(); let file = directory.open_read(path).unwrap();
// assert_eq!(file.len(), 17710 as usize); //bitpacked size //assert_eq!(file.len(), 17710 as usize); //bitpacked size
assert_eq!(file.len(), 10175_usize); // linear interpol size assert_eq!(file.len(), 10175_usize); // linear interpol size
{ {
let fast_fields_composite = CompositeFile::open(&file)?; let fast_fields_composite = CompositeFile::open(&file)?;
@@ -572,16 +587,16 @@ mod tests {
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use super::tests::FIELD;
use super::tests::{generate_permutation, SCHEMA};
use super::*;
use crate::directory::CompositeFile;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
use test::{self, Bencher}; use test::{self, Bencher};
use super::tests::{generate_permutation, FIELD, SCHEMA};
use super::*;
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
#[bench] #[bench]
fn bench_intfastfield_linear_veclookup(b: &mut Bencher) { fn bench_intfastfield_linear_veclookup(b: &mut Bencher) {
let permutation = generate_permutation(); let permutation = generate_permutation();

View File

@@ -7,17 +7,23 @@ pub use self::writer::MultiValuedFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use chrono::Duration;
use futures::executor::block_on;
use proptest::strategy::Strategy;
use proptest::{prop_oneof, proptest};
use test_log::test;
use crate::collector::TopDocs; use crate::collector::TopDocs;
use crate::indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use crate::query::QueryParser; use crate::query::QueryParser;
use crate::schema::{Cardinality, Facet, FacetOptions, IntOptions, Schema}; use crate::schema::Cardinality;
use crate::{Document, Index, Term}; use crate::schema::Facet;
use crate::schema::FacetOptions;
use crate::schema::IntOptions;
use crate::schema::Schema;
use crate::Document;
use crate::Index;
use crate::Term;
use chrono::Duration;
use futures::executor::block_on;
use proptest::prop_oneof;
use proptest::proptest;
use proptest::strategy::Strategy;
use test_log::test;
#[test] #[test]
fn test_multivalued_u64() -> crate::Result<()> { fn test_multivalued_u64() -> crate::Result<()> {
@@ -104,7 +110,7 @@ mod tests {
retrieved_doc retrieved_doc
.get_first(date_field) .get_first(date_field)
.expect("cannot find value") .expect("cannot find value")
.as_date() .date_value()
.unwrap() .unwrap()
.timestamp(), .timestamp(),
first_time_stamp.timestamp() first_time_stamp.timestamp()
@@ -113,7 +119,7 @@ mod tests {
retrieved_doc retrieved_doc
.get_first(time_i) .get_first(time_i)
.expect("cannot find value") .expect("cannot find value")
.as_i64(), .i64_value(),
Some(1i64) Some(1i64)
); );
} }
@@ -132,7 +138,7 @@ mod tests {
retrieved_doc retrieved_doc
.get_first(date_field) .get_first(date_field)
.expect("cannot find value") .expect("cannot find value")
.as_date() .date_value()
.unwrap() .unwrap()
.timestamp(), .timestamp(),
two_secs_ahead.timestamp() two_secs_ahead.timestamp()
@@ -141,7 +147,7 @@ mod tests {
retrieved_doc retrieved_doc
.get_first(time_i) .get_first(time_i)
.expect("cannot find value") .expect("cannot find value")
.as_i64(), .i64_value(),
Some(3i64) Some(3i64)
); );
} }
@@ -174,7 +180,7 @@ mod tests {
retrieved_doc retrieved_doc
.get_first(date_field) .get_first(date_field)
.expect("cannot find value") .expect("cannot find value")
.as_date() .date_value()
.expect("value not of Date type") .expect("value not of Date type")
.timestamp(), .timestamp(),
(first_time_stamp + Duration::seconds(offset_sec)).timestamp() (first_time_stamp + Duration::seconds(offset_sec)).timestamp()
@@ -183,7 +189,7 @@ mod tests {
retrieved_doc retrieved_doc
.get_first(time_i) .get_first(time_i)
.expect("cannot find value") .expect("cannot find value")
.as_i64(), .i64_value(),
Some(time_i_val) Some(time_i_val)
); );
} }

View File

@@ -10,6 +10,7 @@ use crate::DocId;
/// The `vals_reader` will access the concatenated list of all /// The `vals_reader` will access the concatenated list of all
/// values for all reader. /// values for all reader.
/// The `idx_reader` associated, for each document, the index of its first value. /// The `idx_reader` associated, for each document, the index of its first value.
///
#[derive(Clone)] #[derive(Clone)]
pub struct MultiValuedFastFieldReader<Item: FastValue> { pub struct MultiValuedFastFieldReader<Item: FastValue> {
idx_reader: DynamicFastFieldReader<u64>, idx_reader: DynamicFastFieldReader<u64>,

View File

@@ -1,15 +1,13 @@
use std::io;
use fnv::FnvHashMap;
use tantivy_bitpacker::minmax;
use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy; use crate::fastfield::serializer::BitpackedFastFieldSerializerLegacy;
use crate::fastfield::{value_to_u64, CompositeFastFieldSerializer}; use crate::fastfield::CompositeFastFieldSerializer;
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field}; use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use crate::DocId; use crate::DocId;
use crate::{fastfield::value_to_u64, indexer::doc_id_mapping::DocIdMapping};
use fnv::FnvHashMap;
use std::io;
use tantivy_bitpacker::minmax;
/// Writer for multi-valued (as in, more than one value per document) /// Writer for multi-valued (as in, more than one value per document)
/// int fast field. /// int fast field.
@@ -22,8 +20,7 @@ use crate::DocId;
/// - add your document simply by calling `.add_document(...)`. /// - add your document simply by calling `.add_document(...)`.
/// ///
/// The `MultiValuedFastFieldWriter` can be acquired from the /// The `MultiValuedFastFieldWriter` can be acquired from the
/// fastfield writer, by calling /// fastfield writer, by calling [`.get_multivalue_writer(...)`](./struct.FastFieldsWriter.html#method.get_multivalue_writer).
/// [`.get_multivalue_writer(...)`](./struct.FastFieldsWriter.html#method.get_multivalue_writer).
/// ///
/// Once acquired, writing is done by calling calls to /// Once acquired, writing is done by calling calls to
/// `.add_document_vals(&[u64])` once per document. /// `.add_document_vals(&[u64])` once per document.
@@ -79,7 +76,7 @@ impl MultiValuedFastFieldWriter {
// facets are indexed in the `SegmentWriter` as we encode their unordered id. // facets are indexed in the `SegmentWriter` as we encode their unordered id.
if !self.is_facet { if !self.is_facet {
for field_value in doc.field_values() { for field_value in doc.field_values() {
if field_value.field == self.field { if field_value.field() == self.field {
self.add_val(value_to_u64(field_value.value())); self.add_val(value_to_u64(field_value.value()));
} }
} }
@@ -134,6 +131,7 @@ impl MultiValuedFastFieldWriter {
/// During the serialization of the segment, terms gets sorted and /// During the serialization of the segment, terms gets sorted and
/// `tantivy` builds a mapping to convert this `UnorderedTermId` into /// `tantivy` builds a mapping to convert this `UnorderedTermId` into
/// term ordinals. /// term ordinals.
///
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut CompositeFastFieldSerializer, serializer: &mut CompositeFastFieldSerializer,

View File

@@ -1,25 +1,25 @@
use super::FastValue;
use crate::directory::CompositeFile;
use crate::directory::FileSlice;
use crate::directory::OwnedBytes;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema;
use crate::schema::FAST;
use crate::DocId;
use common::BinarySerializable;
use fastfield_codecs::bitpacked::BitpackedFastFieldReader as BitpackedReader;
use fastfield_codecs::bitpacked::BitpackedFastFieldSerializer;
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldReader;
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldReader;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
use fastfield_codecs::FastFieldCodecReader;
use fastfield_codecs::FastFieldCodecSerializer;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
use common::BinarySerializable;
use fastfield_codecs::bitpacked::{
BitpackedFastFieldReader as BitpackedReader, BitpackedFastFieldSerializer,
};
use fastfield_codecs::linearinterpol::{
LinearInterpolFastFieldReader, LinearInterpolFastFieldSerializer,
};
use fastfield_codecs::multilinearinterpol::{
MultiLinearInterpolFastFieldReader, MultiLinearInterpolFastFieldSerializer,
};
use fastfield_codecs::{FastFieldCodecReader, FastFieldCodecSerializer};
use super::FastValue;
use crate::directory::{CompositeFile, Directory, FileSlice, OwnedBytes, RamDirectory, WritePtr};
use crate::fastfield::{CompositeFastFieldSerializer, FastFieldsWriter};
use crate::schema::{Schema, FAST};
use crate::DocId;
/// FastFieldReader is the trait to access fast field data. /// FastFieldReader is the trait to access fast field data.
pub trait FastFieldReader<Item: FastValue>: Clone { pub trait FastFieldReader<Item: FastValue>: Clone {
/// Return the value associated to the given document. /// Return the value associated to the given document.
@@ -64,6 +64,7 @@ pub trait FastFieldReader<Item: FastValue>: Clone {
#[derive(Clone)] #[derive(Clone)]
/// DynamicFastFieldReader wraps different readers to access /// DynamicFastFieldReader wraps different readers to access
/// the various encoded fastfield data /// the various encoded fastfield data
///
pub enum DynamicFastFieldReader<Item: FastValue> { pub enum DynamicFastFieldReader<Item: FastValue> {
/// Bitpacked compressed fastfield data. /// Bitpacked compressed fastfield data.
Bitpacked(FastFieldReaderCodecWrapper<Item, BitpackedReader>), Bitpacked(FastFieldReaderCodecWrapper<Item, BitpackedReader>),
@@ -145,6 +146,7 @@ impl<Item: FastValue> FastFieldReader<Item> for DynamicFastFieldReader<Item> {
/// Wrapper for accessing a fastfield. /// Wrapper for accessing a fastfield.
/// ///
/// Holds the data and the codec to the read the data. /// Holds the data and the codec to the read the data.
///
#[derive(Clone)] #[derive(Clone)]
pub struct FastFieldReaderCodecWrapper<Item: FastValue, CodecReader> { pub struct FastFieldReaderCodecWrapper<Item: FastValue, CodecReader> {
reader: CodecReader, reader: CodecReader,
@@ -160,8 +162,7 @@ impl<Item: FastValue, C: FastFieldCodecReader> FastFieldReaderCodecWrapper<Item,
assert_eq!( assert_eq!(
BitpackedFastFieldSerializer::ID, BitpackedFastFieldSerializer::ID,
id, id,
"Tried to open fast field as bitpacked encoded (id=1), but got serializer with \ "Tried to open fast field as bitpacked encoded (id=1), but got serializer with different id"
different id"
); );
Self::open_from_bytes(bytes) Self::open_from_bytes(bytes)
} }
@@ -248,6 +249,8 @@ impl<Item: FastValue, C: FastFieldCodecReader + Clone> FastFieldReader<Item>
} }
} }
pub(crate) type BitpackedFastFieldReader<Item> = FastFieldReaderCodecWrapper<Item, BitpackedReader>;
impl<Item: FastValue> From<Vec<Item>> for DynamicFastFieldReader<Item> { impl<Item: FastValue> From<Vec<Item>> for DynamicFastFieldReader<Item> {
fn from(vals: Vec<Item>) -> DynamicFastFieldReader<Item> { fn from(vals: Vec<Item>) -> DynamicFastFieldReader<Item> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();

View File

@@ -1,12 +1,14 @@
use super::reader::DynamicFastFieldReader; use crate::directory::CompositeFile;
use crate::directory::{CompositeFile, FileSlice}; use crate::directory::FileSlice;
use crate::fastfield::{ use crate::fastfield::MultiValuedFastFieldReader;
BytesFastFieldReader, FastFieldNotAvailableError, FastValue, MultiValuedFastFieldReader, use crate::fastfield::{BitpackedFastFieldReader, FastFieldNotAvailableError};
}; use crate::fastfield::{BytesFastFieldReader, FastValue};
use crate::schema::{Cardinality, Field, FieldType, Schema}; use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use crate::TantivyError; use crate::TantivyError;
use super::reader::DynamicFastFieldReader;
/// Provides access to all of the BitpackedFastFieldReader. /// Provides access to all of the BitpackedFastFieldReader.
/// ///
/// Internally, `FastFieldReaders` have preloaded fast field readers, /// Internally, `FastFieldReaders` have preloaded fast field readers,
@@ -129,11 +131,10 @@ impl FastFieldReaders {
self.typed_fast_field_reader(field) self.typed_fast_field_reader(field)
} }
/// Returns the `u64` fast field reader reader associated to `field`, regardless of whether the /// Returns the `u64` fast field reader reader associated to `field`, regardless of whether the given
/// given field is effectively of type `u64` or not. /// field is effectively of type `u64` or not.
/// ///
/// If not, the fastfield reader will returns the u64-value associated to the original /// If not, the fastfield reader will returns the u64-value associated to the original FastValue.
/// FastValue.
pub fn u64_lenient(&self, field: Field) -> crate::Result<DynamicFastFieldReader<u64>> { pub fn u64_lenient(&self, field: Field) -> crate::Result<DynamicFastFieldReader<u64>> {
self.typed_fast_field_reader(field) self.typed_fast_field_reader(field)
} }
@@ -170,8 +171,8 @@ impl FastFieldReaders {
self.typed_fast_field_multi_reader(field) self.typed_fast_field_multi_reader(field)
} }
/// Returns a `u64s` multi-valued fast field reader reader associated to `field`, regardless of /// Returns a `u64s` multi-valued fast field reader reader associated to `field`, regardless of whether the given
/// whether the given field is effectively of type `u64` or not. /// field is effectively of type `u64` or not.
/// ///
/// If `field` is not a u64 multi-valued fast field, this method returns an Error. /// If `field` is not a u64 multi-valued fast field, this method returns an Error.
pub fn u64s_lenient(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> { pub fn u64s_lenient(&self, field: Field) -> crate::Result<MultiValuedFastFieldReader<u64>> {
@@ -218,7 +219,7 @@ impl FastFieldReaders {
))); )));
} }
let fast_field_idx_file = self.fast_field_data(field, 0)?; let fast_field_idx_file = self.fast_field_data(field, 0)?;
let idx_reader = DynamicFastFieldReader::open(fast_field_idx_file)?; let idx_reader = BitpackedFastFieldReader::open(fast_field_idx_file)?;
let data = self.fast_field_data(field, 1)?; let data = self.fast_field_data(field, 1)?;
BytesFastFieldReader::open(idx_reader, data) BytesFastFieldReader::open(idx_reader, data)
} else { } else {

View File

@@ -1,15 +1,16 @@
use std::io::{self, Write}; use crate::directory::CompositeWrite;
use crate::directory::WritePtr;
use common::{BinarySerializable, CountingWriter}; use crate::schema::Field;
pub use fastfield_codecs::bitpacked::{ use common::BinarySerializable;
BitpackedFastFieldSerializer, BitpackedFastFieldSerializerLegacy, use common::CountingWriter;
}; pub use fastfield_codecs::bitpacked::BitpackedFastFieldSerializer;
pub use fastfield_codecs::bitpacked::BitpackedFastFieldSerializerLegacy;
use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer; use fastfield_codecs::linearinterpol::LinearInterpolFastFieldSerializer;
use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer; use fastfield_codecs::multilinearinterpol::MultiLinearInterpolFastFieldSerializer;
pub use fastfield_codecs::{FastFieldCodecSerializer, FastFieldDataAccess, FastFieldStats}; pub use fastfield_codecs::FastFieldCodecSerializer;
pub use fastfield_codecs::FastFieldDataAccess;
use crate::directory::{CompositeWrite, WritePtr}; pub use fastfield_codecs::FastFieldStats;
use crate::schema::Field; use std::io::{self, Write};
/// `CompositeFastFieldSerializer` is in charge of serializing /// `CompositeFastFieldSerializer` is in charge of serializing
/// fastfields on disk. /// fastfields on disk.
@@ -57,8 +58,7 @@ impl CompositeFastFieldSerializer {
Ok(CompositeFastFieldSerializer { composite_write }) Ok(CompositeFastFieldSerializer { composite_write })
} }
/// Serialize data into a new u64 fast field. The best compression codec will be chosen /// Serialize data into a new u64 fast field. The best compression codec will be chosen automatically.
/// automatically.
pub fn create_auto_detect_u64_fast_field( pub fn create_auto_detect_u64_fast_field(
&mut self, &mut self,
field: Field, field: Field,
@@ -76,8 +76,7 @@ impl CompositeFastFieldSerializer {
0, 0,
) )
} }
/// Serialize data into a new u64 fast field. The best compression codec will be chosen /// Serialize data into a new u64 fast field. The best compression codec will be chosen automatically.
/// automatically.
pub fn create_auto_detect_u64_fast_field_with_idx( pub fn create_auto_detect_u64_fast_field_with_idx(
&mut self, &mut self,
field: Field, field: Field,
@@ -113,8 +112,7 @@ impl CompositeFastFieldSerializer {
broken_estimation.1 broken_estimation.1
); );
} }
// removing nan values for codecs with broken calculations, and max values which disables // removing nan values for codecs with broken calculations, and max values which disables codecs
// codecs
estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX); estimations.retain(|estimation| !estimation.0.is_nan() && estimation.0 != f32::MAX);
estimations.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); estimations.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let (_ratio, name, id) = estimations[0]; let (_ratio, name, id) = estimations[0];

View File

@@ -1,10 +1,3 @@
use std::collections::HashMap;
use std::io;
use common;
use fnv::FnvHashMap;
use tantivy_bitpacker::BlockedBitpacker;
use super::multivalued::MultiValuedFastFieldWriter; use super::multivalued::MultiValuedFastFieldWriter;
use super::serializer::FastFieldStats; use super::serializer::FastFieldStats;
use super::FastFieldDataAccess; use super::FastFieldDataAccess;
@@ -13,6 +6,11 @@ use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema}; use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use common;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
use tantivy_bitpacker::BlockedBitpacker;
/// The fastfieldswriter regroup all of the fast field writers. /// The fastfieldswriter regroup all of the fast field writers.
pub struct FastFieldsWriter { pub struct FastFieldsWriter {
@@ -326,8 +324,7 @@ struct WriterFastFieldAccessProvider<'map, 'bitp> {
impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'bitp> { impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'bitp> {
/// Return the value associated to the given doc. /// Return the value associated to the given doc.
/// ///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance /// Whenever possible use the Iterator passed to the fastfield creation instead, for performance reasons.
/// reasons.
/// ///
/// # Panics /// # Panics
/// ///
@@ -335,9 +332,7 @@ impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'b
fn get_val(&self, doc: u64) -> u64 { fn get_val(&self, doc: u64) -> u64 {
if let Some(doc_id_map) = self.doc_id_map { if let Some(doc_id_map) = self.doc_id_map {
self.vals self.vals
.get(doc_id_map.get_old_doc_id(doc as u32) as usize) // consider extra .get(doc_id_map.get_old_doc_id(doc as u32) as usize) // consider extra FastFieldReader wrapper for non doc_id_map
// FastFieldReader wrapper for
// non doc_id_map
} else { } else {
self.vals.get(doc as usize) self.vals.get(doc as usize)
} }

View File

@@ -21,24 +21,32 @@ mod reader;
mod serializer; mod serializer;
mod writer; mod writer;
use self::code::{fieldnorm_to_id, id_to_fieldnorm};
pub use self::reader::{FieldNormReader, FieldNormReaders}; pub use self::reader::{FieldNormReader, FieldNormReaders};
pub use self::serializer::FieldNormsSerializer; pub use self::serializer::FieldNormsSerializer;
pub use self::writer::FieldNormsWriter; pub use self::writer::FieldNormsWriter;
use self::code::{fieldnorm_to_id, id_to_fieldnorm};
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::CompositeFile;
use crate::directory::{Directory, RamDirectory, WritePtr};
use crate::fieldnorm::FieldNormReader;
use crate::fieldnorm::FieldNormsSerializer;
use crate::fieldnorm::FieldNormsWriter;
use crate::query::Query;
use crate::query::TermQuery;
use crate::schema::IndexRecordOption;
use crate::schema::TextFieldIndexing;
use crate::schema::TextOptions;
use crate::schema::TEXT;
use crate::Index;
use crate::Term;
use crate::TERMINATED;
use once_cell::sync::Lazy;
use std::path::Path; use std::path::Path;
use once_cell::sync::Lazy; use crate::schema::{Field, Schema, STORED};
use crate::directory::{CompositeFile, Directory, RamDirectory, WritePtr};
use crate::fieldnorm::{FieldNormReader, FieldNormsSerializer, FieldNormsWriter};
use crate::query::{Query, TermQuery};
use crate::schema::{
Field, IndexRecordOption, Schema, TextFieldIndexing, TextOptions, STORED, TEXT,
};
use crate::{Index, Term, TERMINATED};
pub static SCHEMA: Lazy<Schema> = Lazy::new(|| { pub static SCHEMA: Lazy<Schema> = Lazy::new(|| {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -79,7 +87,7 @@ mod tests {
fieldnorm_writers.record(3u32, *TXT_FIELD, 3); fieldnorm_writers.record(3u32, *TXT_FIELD, 3);
fieldnorm_writers.serialize(serializer, None)?; fieldnorm_writers.serialize(serializer, None)?;
} }
let file = directory.open_read(path)?; let file = directory.open_read(&path)?;
{ {
let fields_composite = CompositeFile::open(&file)?; let fields_composite = CompositeFile::open(&file)?;
assert!(fields_composite.open_read(*FIELD).is_none()); assert!(fields_composite.open_read(*FIELD).is_none());

View File

@@ -1,10 +1,11 @@
use std::sync::Arc;
use super::{fieldnorm_to_id, id_to_fieldnorm}; use super::{fieldnorm_to_id, id_to_fieldnorm};
use crate::directory::{CompositeFile, FileSlice, OwnedBytes}; use crate::directory::CompositeFile;
use crate::directory::FileSlice;
use crate::directory::OwnedBytes;
use crate::schema::Field; use crate::schema::Field;
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use crate::DocId; use crate::DocId;
use std::sync::Arc;
/// Reader for the fieldnorm (for each document, the number of tokens indexed in the /// Reader for the fieldnorm (for each document, the number of tokens indexed in the
/// field) of all indexed fields in the index. /// field) of all indexed fields in the index.

View File

@@ -1,9 +1,9 @@
use crate::directory::CompositeWrite;
use crate::directory::WritePtr;
use crate::schema::Field;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
use crate::directory::{CompositeWrite, WritePtr};
use crate::schema::Field;
/// The fieldnorms serializer is in charge of /// The fieldnorms serializer is in charge of
/// the serialization of field norms for all fields. /// the serialization of field norms for all fields.
pub struct FieldNormsSerializer { pub struct FieldNormsSerializer {

View File

@@ -1,11 +1,12 @@
use crate::{indexer::doc_id_mapping::DocIdMapping, DocId};
use super::fieldnorm_to_id;
use super::FieldNormsSerializer;
use crate::schema::Field;
use crate::schema::Schema;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::{io, iter}; use std::{io, iter};
use super::{fieldnorm_to_id, FieldNormsSerializer};
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::schema::{Field, Schema};
use crate::DocId;
/// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte /// The `FieldNormsWriter` is in charge of tracking the fieldnorm byte
/// of each document for each field with field norms. /// of each document for each field with field norms.
/// ///

View File

@@ -1,10 +1,14 @@
use crate::schema;
use crate::Index;
use crate::IndexSettings;
use crate::IndexSortByField;
use crate::Order;
use crate::Searcher;
use crate::{doc, schema::*};
use rand::thread_rng;
use rand::Rng;
use std::collections::HashSet; use std::collections::HashSet;
use rand::{thread_rng, Rng};
use crate::schema::*;
use crate::{doc, schema, Index, IndexSettings, IndexSortByField, Order, Searcher};
fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> { fn check_index_content(searcher: &Searcher, vals: &[u64]) -> crate::Result<()> {
assert!(searcher.segment_readers().len() < 20); assert!(searcher.segment_readers().len() < 20);
assert_eq!(searcher.num_docs() as usize, vals.len()); assert_eq!(searcher.num_docs() as usize, vals.len());
@@ -126,12 +130,14 @@ fn test_functional_indexing_sorted() -> crate::Result<()> {
Ok(()) Ok(())
} }
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod \ const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed \
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, \ do eiusmod tempor incididunt ut labore et dolore magna aliqua. \
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo \ Ut enim ad minim veniam, quis nostrud exercitation ullamco \
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse \ laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat \ dolor in reprehenderit in voluptate velit esse cillum dolore eu \
non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; fugiat nulla pariatur. Excepteur sint occaecat cupidatat non \
proident, sunt in culpa qui officia deserunt mollit anim id est \
laborum.";
fn get_text() -> String { fn get_text() -> String {
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
let mut rng = thread_rng(); let mut rng = thread_rng();

View File

@@ -1,9 +1,9 @@
use std::ops::DerefMut;
use std::sync::{Arc, RwLock, Weak};
use super::operation::DeleteOperation; use super::operation::DeleteOperation;
use crate::Opstamp; use crate::Opstamp;
use std::ops::DerefMut;
use std::sync::{Arc, RwLock, Weak};
// The DeleteQueue is similar in conceptually to a multiple // The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel. // consumer single producer broadcast channel.
// //
@@ -13,10 +13,12 @@ use crate::Opstamp;
// which points to a specific place of the `DeleteQueue`. // which points to a specific place of the `DeleteQueue`.
// //
// New consumer can be created in two ways // New consumer can be created in two ways
// - calling `delete_queue.cursor()` returns a cursor, that will include all future delete operation // - calling `delete_queue.cursor()` returns a cursor, that
// (and some or none of the past operations... The client is in charge of checking the opstamps.). // will include all future delete operation (and some or none
// - cloning an existing cursor returns a new cursor, that is at the exact same position, and can // of the past operations... The client is in charge of checking the opstamps.).
// now advance independently from the original cursor. // - cloning an existing cursor returns a new cursor, that
// is at the exact same position, and can now advance independently
// from the original cursor.
#[derive(Default)] #[derive(Default)]
struct InnerDeleteQueue { struct InnerDeleteQueue {
writer: Vec<DeleteOperation>, writer: Vec<DeleteOperation>,
@@ -177,8 +179,8 @@ pub struct DeleteCursor {
impl DeleteCursor { impl DeleteCursor {
/// Skips operations and position it so that /// Skips operations and position it so that
/// - either all of the delete operation currently in the queue are consume and the next get /// - either all of the delete operation currently in the
/// will return None. /// queue are consume and the next get will return None.
/// - the next get will return the first operation with an /// - the next get will return the first operation with an
/// `opstamp >= target_opstamp`. /// `opstamp >= target_opstamp`.
pub fn skip_to(&mut self, target_opstamp: Opstamp) { pub fn skip_to(&mut self, target_opstamp: Opstamp) {

View File

@@ -5,8 +5,8 @@ use crate::fastfield::AliveBitSet;
use crate::{merge_filtered_segments, Directory, Index, IndexSettings, Segment, SegmentOrdinal}; use crate::{merge_filtered_segments, Directory, Index, IndexSettings, Segment, SegmentOrdinal};
/// DemuxMapping can be used to reorganize data from multiple segments. /// DemuxMapping can be used to reorganize data from multiple segments.
/// ///
/// DemuxMapping is useful in a multitenant settings, in which each document might actually belong /// DemuxMapping is useful in a multitenant settings, in which each document might actually belong to a different tenant.
/// to a different tenant. It allows to reorganize documents as follows: /// It allows to reorganize documents as follows:
/// ///
/// e.g. if you have two tenant ids TENANT_A and TENANT_B and two segments with /// e.g. if you have two tenant ids TENANT_A and TENANT_B and two segments with
/// the documents (simplified) /// the documents (simplified)
@@ -18,8 +18,7 @@ use crate::{merge_filtered_segments, Directory, Index, IndexSettings, Segment, S
/// Seg 2 [TENANT_B, TENANT_B] /// Seg 2 [TENANT_B, TENANT_B]
/// ///
/// Demuxing is the tool for that. /// Demuxing is the tool for that.
/// Semantically you can define a mapping from [old segment ordinal, old doc_id] -> [new segment /// Semantically you can define a mapping from [old segment ordinal, old doc_id] -> [new segment ordinal].
/// ordinal].
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct DemuxMapping { pub struct DemuxMapping {
/// [index old segment ordinal] -> [index doc_id] = new segment ordinal /// [index old segment ordinal] -> [index doc_id] = new segment ordinal
@@ -133,24 +132,27 @@ pub fn demux(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{
collector::TopDocs,
directory::RamDirectory,
query::QueryParser,
schema::{Schema, TEXT},
DocAddress, Term,
};
use super::*; use super::*;
use crate::collector::TopDocs;
use crate::directory::RamDirectory;
use crate::query::QueryParser;
use crate::schema::{Schema, TEXT};
use crate::{DocAddress, Term};
#[test] #[test]
fn test_demux_map_to_deletebitset() { fn test_demux_map_to_deletebitset() {
let max_value = 2; let max_value = 2;
let mut demux_mapping = DemuxMapping::default(); let mut demux_mapping = DemuxMapping::default();
// segment ordinal 0 mapping //segment ordinal 0 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value); let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1); doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 0); doc_id_to_segment.set(1, 0);
demux_mapping.add(doc_id_to_segment); demux_mapping.add(doc_id_to_segment);
// segment ordinal 1 mapping //segment ordinal 1 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value); let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1); doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 1); doc_id_to_segment.set(1, 1);
@@ -233,13 +235,13 @@ mod tests {
let mut demux_mapping = DemuxMapping::default(); let mut demux_mapping = DemuxMapping::default();
{ {
let max_value = 2; let max_value = 2;
// segment ordinal 0 mapping //segment ordinal 0 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value); let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1); doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 0); doc_id_to_segment.set(1, 0);
demux_mapping.add(doc_id_to_segment); demux_mapping.add(doc_id_to_segment);
// segment ordinal 1 mapping //segment ordinal 1 mapping
let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value); let mut doc_id_to_segment = DocIdToSegmentOrdinal::with_max_doc(max_value);
doc_id_to_segment.set(0, 1); doc_id_to_segment.set(0, 1);
doc_id_to_segment.set(1, 1); doc_id_to_segment.set(1, 1);
@@ -272,7 +274,7 @@ mod tests {
let text_field = index.schema().get_field("text").unwrap(); let text_field = index.schema().get_field("text").unwrap();
let do_search = |term: &str| { let do_search = |term: &str| {
let query = QueryParser::for_index(index, vec![text_field]) let query = QueryParser::for_index(&index, vec![text_field])
.parse_query(term) .parse_query(term)
.unwrap(); .unwrap();
let top_docs: Vec<(f32, DocAddress)> = let top_docs: Vec<(f32, DocAddress)> =
@@ -301,7 +303,7 @@ mod tests {
let text_field = index.schema().get_field("text").unwrap(); let text_field = index.schema().get_field("text").unwrap();
let do_search = |term: &str| { let do_search = |term: &str| {
let query = QueryParser::for_index(index, vec![text_field]) let query = QueryParser::for_index(&index, vec![text_field])
.parse_query(term) .parse_query(term)
.unwrap(); .unwrap();
let top_docs: Vec<(f32, DocAddress)> = let top_docs: Vec<(f32, DocAddress)> =

View File

@@ -1,12 +1,13 @@
//! This module is used when sorting the index by a property, e.g. //! This module is used when sorting the index by a property, e.g.
//! to get mappings from old doc_id to new doc_id and vice versa, after sorting //! to get mappings from old doc_id to new doc_id and vice versa, after sorting
//!
use std::cmp::Reverse;
use std::ops::Index;
use super::SegmentWriter; use super::SegmentWriter;
use crate::schema::{Field, Schema}; use crate::{
use crate::{DocId, IndexSortByField, Order, SegmentOrdinal, TantivyError}; schema::{Field, Schema},
DocId, IndexSortByField, Order, SegmentOrdinal, TantivyError,
};
use std::{cmp::Reverse, ops::Index};
/// Struct to provide mapping from new doc_id to old doc_id and segment. /// Struct to provide mapping from new doc_id to old doc_id and segment.
#[derive(Clone)] #[derive(Clone)]
@@ -151,12 +152,11 @@ pub(crate) fn get_doc_id_mapping_from_field(
#[cfg(test)] #[cfg(test)]
mod tests_indexsorting { mod tests_indexsorting {
use crate::collector::TopDocs;
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::indexer::doc_id_mapping::DocIdMapping; use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::query::QueryParser; use crate::{collector::TopDocs, query::QueryParser, schema::*};
use crate::schema::{Schema, *}; use crate::{schema::Schema, DocAddress};
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order}; use crate::{Index, IndexSettings, IndexSortByField, Order};
fn create_test_index( fn create_test_index(
index_settings: Option<IndexSettings>, index_settings: Option<IndexSettings>,
@@ -217,7 +217,7 @@ mod tests_indexsorting {
]; ];
for option in options { for option in options {
// let options = get_text_options(); //let options = get_text_options();
// no index_sort // no index_sort
let index = create_test_index(None, option.clone())?; let index = create_test_index(None, option.clone())?;
let my_text_field = index.schema().get_field("text_field").unwrap(); let my_text_field = index.schema().get_field("text_field").unwrap();
@@ -318,7 +318,7 @@ mod tests_indexsorting {
.doc(DocAddress::new(0, 3))? .doc(DocAddress::new(0, 3))?
.get_first(my_string_field) .get_first(my_string_field)
.unwrap() .unwrap()
.as_text(), .text(),
Some("blublub") Some("blublub")
); );
} }
@@ -341,7 +341,7 @@ mod tests_indexsorting {
.doc(DocAddress::new(0, 0))? .doc(DocAddress::new(0, 0))?
.get_first(my_string_field) .get_first(my_string_field)
.unwrap() .unwrap()
.as_text(), .text(),
Some("blublub") Some("blublub")
); );
let doc = searcher.doc(DocAddress::new(0, 4))?; let doc = searcher.doc(DocAddress::new(0, 4))?;
@@ -363,7 +363,7 @@ mod tests_indexsorting {
{ {
let doc = searcher.doc(DocAddress::new(0, 4))?; let doc = searcher.doc(DocAddress::new(0, 4))?;
assert_eq!( assert_eq!(
doc.get_first(my_string_field).unwrap().as_text(), doc.get_first(my_string_field).unwrap().text(),
Some("blublub") Some("blublub")
); );
} }

View File

@@ -1,4 +1,5 @@
use crate::{DocId, Opstamp}; use crate::DocId;
use crate::Opstamp;
// Doc to opstamp is used to identify which // Doc to opstamp is used to identify which
// document should be deleted. // document should be deleted.

View File

@@ -1,19 +1,14 @@
use std::ops::Range;
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
use common::BitSet;
use crossbeam::channel;
use futures::executor::block_on;
use futures::future::Future;
use smallvec::smallvec;
use super::operation::{AddOperation, UserOperation}; use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater; use super::segment_updater::SegmentUpdater;
use super::{AddBatch, AddBatchReceiver, AddBatchSender, PreparedCommit}; use super::PreparedCommit;
use crate::core::{Index, Segment, SegmentComponent, SegmentId, SegmentMeta, SegmentReader}; use crate::core::Index;
use crate::directory::{DirectoryLock, GarbageCollectionResult, TerminatingWrite}; use crate::core::Segment;
use crate::core::SegmentComponent;
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentReader;
use crate::directory::TerminatingWrite;
use crate::directory::{DirectoryLock, GarbageCollectionResult};
use crate::docset::{DocSet, TERMINATED}; use crate::docset::{DocSet, TERMINATED};
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::fastfield::write_alive_bitset; use crate::fastfield::write_alive_bitset;
@@ -22,17 +17,32 @@ use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
use crate::indexer::index_writer_status::IndexWriterStatus; use crate::indexer::index_writer_status::IndexWriterStatus;
use crate::indexer::operation::DeleteOperation; use crate::indexer::operation::DeleteOperation;
use crate::indexer::stamper::Stamper; use crate::indexer::stamper::Stamper;
use crate::indexer::{MergePolicy, SegmentEntry, SegmentWriter}; use crate::indexer::MergePolicy;
use crate::schema::{Document, IndexRecordOption, Term}; use crate::indexer::SegmentEntry;
use crate::indexer::SegmentWriter;
use crate::schema::Document;
use crate::schema::IndexRecordOption;
use crate::schema::Term;
use crate::Opstamp; use crate::Opstamp;
use common::BitSet;
use crossbeam::channel;
use futures::executor::block_on;
use futures::future::Future;
use smallvec::smallvec;
use std::ops::Range;
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
// Size of the margin for the `memory_arena`. A segment is closed when the remaining memory use super::{AddBatch, AddBatchReceiver, AddBatchSender};
// in the `memory_arena` goes below MARGIN_IN_BYTES.
// Size of the margin for the heap. A segment is closed when the remaining memory
// in the heap goes below MARGIN_IN_BYTES.
pub const MARGIN_IN_BYTES: usize = 1_000_000; pub const MARGIN_IN_BYTES: usize = 1_000_000;
// We impose the memory per thread to be at least 3 MB. // We impose the memory per thread to be at least 3 MB.
pub const MEMORY_ARENA_NUM_BYTES_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize; pub const HEAP_SIZE_MIN: usize = ((MARGIN_IN_BYTES as u32) * 3u32) as usize;
pub const MEMORY_ARENA_NUM_BYTES_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES; pub const HEAP_SIZE_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
// We impose the number of index writter thread to be at most this. // We impose the number of index writter thread to be at most this.
pub const MAX_NUM_THREAD: usize = 8; pub const MAX_NUM_THREAD: usize = 8;
@@ -61,7 +71,7 @@ pub struct IndexWriter {
index: Index, index: Index,
memory_arena_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>, workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
@@ -179,10 +189,10 @@ fn index_documents(
) -> crate::Result<()> { ) -> crate::Result<()> {
let schema = segment.schema(); let schema = segment.schema();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), schema)?; let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
for document_group in grouped_document_iterator { for document_group in grouped_document_iterator {
for doc in document_group { for doc in document_group {
segment_writer.add_document(doc)?; segment_writer.add_document(doc, &schema)?;
} }
let mem_usage = segment_writer.mem_usage(); let mem_usage = segment_writer.mem_usage();
if mem_usage >= memory_budget - MARGIN_IN_BYTES { if mem_usage >= memory_budget - MARGIN_IN_BYTES {
@@ -268,26 +278,22 @@ impl IndexWriter {
/// should work at the same time. /// should work at the same time.
/// # Errors /// # Errors
/// If the lockfile already exists, returns `Error::FileAlreadyExists`. /// If the lockfile already exists, returns `Error::FileAlreadyExists`.
/// If the memory arena per thread is too small or too big, returns /// If the heap size per thread is too small or too big, returns `TantivyError::InvalidArgument`
/// `TantivyError::InvalidArgument`
pub(crate) fn new( pub(crate) fn new(
index: &Index, index: &Index,
num_threads: usize, num_threads: usize,
memory_arena_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
directory_lock: DirectoryLock, directory_lock: DirectoryLock,
) -> crate::Result<IndexWriter> { ) -> crate::Result<IndexWriter> {
if memory_arena_in_bytes_per_thread < MEMORY_ARENA_NUM_BYTES_MIN { if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
let err_msg = format!( let err_msg = format!(
"The memory arena in bytes per thread needs to be at least {}.", "The heap size per thread needs to be at least {}.",
MEMORY_ARENA_NUM_BYTES_MIN HEAP_SIZE_MIN
); );
return Err(TantivyError::InvalidArgument(err_msg)); return Err(TantivyError::InvalidArgument(err_msg));
} }
if memory_arena_in_bytes_per_thread >= MEMORY_ARENA_NUM_BYTES_MAX { if heap_size_in_bytes_per_thread >= HEAP_SIZE_MAX {
let err_msg = format!( let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
"The memory arena in bytes per thread cannot exceed {}",
MEMORY_ARENA_NUM_BYTES_MAX
);
return Err(TantivyError::InvalidArgument(err_msg)); return Err(TantivyError::InvalidArgument(err_msg));
} }
let (document_sender, document_receiver): (AddBatchSender, AddBatchReceiver) = let (document_sender, document_receiver): (AddBatchSender, AddBatchReceiver) =
@@ -305,7 +311,7 @@ impl IndexWriter {
let mut index_writer = IndexWriter { let mut index_writer = IndexWriter {
_directory_lock: Some(directory_lock), _directory_lock: Some(directory_lock),
memory_arena_in_bytes_per_thread, heap_size_in_bytes_per_thread,
index: index.clone(), index: index.clone(),
index_writer_status: IndexWriterStatus::from(document_receiver), index_writer_status: IndexWriterStatus::from(document_receiver),
@@ -386,13 +392,7 @@ impl IndexWriter {
fn operation_receiver(&self) -> crate::Result<AddBatchReceiver> { fn operation_receiver(&self) -> crate::Result<AddBatchReceiver> {
self.index_writer_status self.index_writer_status
.operation_receiver() .operation_receiver()
.ok_or_else(|| { .ok_or_else(|| crate::TantivyError::ErrorInThread("The index writer was killed. It can happen if an indexing worker encounterred an Io error for instance.".to_string()))
crate::TantivyError::ErrorInThread(
"The index writer was killed. It can happen if an indexing worker \
encounterred an Io error for instance."
.to_string(),
)
})
} }
/// Spawns a new worker thread for indexing. /// Spawns a new worker thread for indexing.
@@ -405,7 +405,7 @@ impl IndexWriter {
let mut delete_cursor = self.delete_queue.cursor(); let mut delete_cursor = self.delete_queue.cursor();
let mem_budget = self.memory_arena_in_bytes_per_thread; let mem_budget = self.heap_size_in_bytes_per_thread;
let index = self.index.clone(); let index = self.index.clone();
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new() let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
.name(format!("thrd-tantivy-index{}", self.worker_id)) .name(format!("thrd-tantivy-index{}", self.worker_id))
@@ -564,7 +564,7 @@ impl IndexWriter {
let new_index_writer: IndexWriter = IndexWriter::new( let new_index_writer: IndexWriter = IndexWriter::new(
&self.index, &self.index,
self.num_threads, self.num_threads,
self.memory_arena_in_bytes_per_thread, self.heap_size_in_bytes_per_thread,
directory_lock, directory_lock,
)?; )?;
@@ -653,6 +653,7 @@ impl IndexWriter {
/// ///
/// Commit returns the `opstamp` of the last document /// Commit returns the `opstamp` of the last document
/// that made it in the commit. /// that made it in the commit.
///
pub fn commit(&mut self) -> crate::Result<Opstamp> { pub fn commit(&mut self) -> crate::Result<Opstamp> {
self.prepare_commit()?.commit() self.prepare_commit()?.commit()
} }
@@ -779,7 +780,8 @@ impl Drop for IndexWriter {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::{HashMap, HashSet}; use std::collections::HashMap;
use std::collections::HashSet;
use futures::executor::block_on; use futures::executor::block_on;
use proptest::prelude::*; use proptest::prelude::*;
@@ -792,20 +794,31 @@ mod tests {
use crate::error::*; use crate::error::*;
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use crate::query::{QueryParser, TermQuery}; use crate::query::QueryParser;
use crate::schema::{ use crate::query::TermQuery;
self, Cardinality, Facet, FacetOptions, IndexRecordOption, IntOptions, TextFieldIndexing, use crate::schema::Cardinality;
TextOptions, FAST, INDEXED, STORED, STRING, TEXT, use crate::schema::Facet;
}; use crate::schema::FacetOptions;
use crate::{DocAddress, Index, IndexSettings, IndexSortByField, Order, ReloadPolicy, Term}; use crate::schema::IntOptions;
use crate::schema::TextFieldIndexing;
use crate::schema::TextOptions;
use crate::schema::STORED;
use crate::schema::TEXT;
use crate::schema::{self, IndexRecordOption, FAST, INDEXED, STRING};
use crate::DocAddress;
use crate::Index;
use crate::ReloadPolicy;
use crate::Term;
use crate::{IndexSettings, IndexSortByField, Order};
const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \ const LOREM: &str = "Doc Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed \
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad \ do eiusmod tempor incididunt ut labore et dolore magna aliqua. \
minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip \ Ut enim ad minim veniam, quis nostrud exercitation ullamco \
ex ea commodo consequat. Duis aute irure dolor in reprehenderit in \ laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur \ dolor in reprehenderit in voluptate velit esse cillum dolore eu \
sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \ fugiat nulla pariatur. Excepteur sint occaecat cupidatat non \
mollit anim id est laborum."; proident, sunt in culpa qui officia deserunt mollit anim id est \
laborum.";
#[test] #[test]
fn test_operations_group() { fn test_operations_group() {
@@ -960,8 +973,8 @@ mod tests {
let index_writer = index.writer(3_000_000).unwrap(); let index_writer = index.writer(3_000_000).unwrap();
assert_eq!( assert_eq!(
format!("{:?}", index_writer.get_merge_policy()), format!("{:?}", index_writer.get_merge_policy()),
"LogMergePolicy { min_num_segments: 8, max_docs_before_merge: 10000000, \ "LogMergePolicy { min_num_segments: 8, max_docs_before_merge: 10000000, min_layer_size: 10000, \
min_layer_size: 10000, level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }" level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }"
); );
let merge_policy = Box::new(NoMergePolicy::default()); let merge_policy = Box::new(NoMergePolicy::default());
index_writer.set_merge_policy(merge_policy); index_writer.set_merge_policy(merge_policy);
@@ -1389,7 +1402,6 @@ mod tests {
) -> crate::Result<()> { ) -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED); let id_field = schema_builder.add_u64_field("id", FAST | INDEXED | STORED);
let bytes_field = schema_builder.add_bytes_field("bytes", FAST | INDEXED | STORED);
let text_field = schema_builder.add_text_field( let text_field = schema_builder.add_text_field(
"text_field", "text_field",
TextOptions::default() TextOptions::default()
@@ -1436,14 +1448,8 @@ mod tests {
match op { match op {
IndexingOp::AddDoc { id } => { IndexingOp::AddDoc { id } => {
let facet = Facet::from(&("/cola/".to_string() + &id.to_string())); let facet = Facet::from(&("/cola/".to_string() + &id.to_string()));
index_writer.add_document(doc!(id_field=>id, index_writer
bytes_field => id.to_le_bytes().as_slice(), .add_document(doc!(id_field=>id, multi_numbers=> id, multi_numbers => id, text_field => id.to_string(), facet_field => facet, large_text_field=> LOREM))?;
multi_numbers=> id,
multi_numbers => id,
text_field => id.to_string(),
facet_field => facet,
large_text_field=> LOREM
))?;
} }
IndexingOp::DeleteDoc { id } => { IndexingOp::DeleteDoc { id } => {
index_writer.delete_term(Term::from_field_u64(id_field, id)); index_writer.delete_term(Term::from_field_u64(id_field, id));
@@ -1541,7 +1547,12 @@ mod tests {
let store_reader = segment_reader.get_store_reader().unwrap(); let store_reader = segment_reader.get_store_reader().unwrap();
// test store iterator // test store iterator
for doc in store_reader.iter(segment_reader.alive_bitset()) { for doc in store_reader.iter(segment_reader.alive_bitset()) {
let id = doc.unwrap().get_first(id_field).unwrap().as_u64().unwrap(); let id = doc
.unwrap()
.get_first(id_field)
.unwrap()
.u64_value()
.unwrap();
assert!(expected_ids_and_num_occurences.contains_key(&id)); assert!(expected_ids_and_num_occurences.contains_key(&id));
} }
// test store random access // test store random access
@@ -1551,7 +1562,7 @@ mod tests {
.unwrap() .unwrap()
.get_first(id_field) .get_first(id_field)
.unwrap() .unwrap()
.as_u64() .u64_value()
.unwrap(); .unwrap();
assert!(expected_ids_and_num_occurences.contains_key(&id)); assert!(expected_ids_and_num_occurences.contains_key(&id));
let id2 = store_reader let id2 = store_reader
@@ -1559,7 +1570,7 @@ mod tests {
.unwrap() .unwrap()
.get_first(multi_numbers) .get_first(multi_numbers)
.unwrap() .unwrap()
.as_u64() .u64_value()
.unwrap(); .unwrap();
assert_eq!(id, id2); assert_eq!(id, id2);
} }

View File

@@ -90,11 +90,9 @@ impl Drop for IndexWriterBomb {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::mem;
use crossbeam::channel;
use super::IndexWriterStatus; use super::IndexWriterStatus;
use crossbeam::channel;
use std::mem;
#[test] #[test]
fn test_bomb_goes_boom() { fn test_bomb_goes_boom() {

View File

@@ -1,9 +1,7 @@
use std::cmp;
use itertools::Itertools;
use super::merge_policy::{MergeCandidate, MergePolicy}; use super::merge_policy::{MergeCandidate, MergePolicy};
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use itertools::Itertools;
use std::cmp;
const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75; const DEFAULT_LEVEL_LOG_SIZE: f64 = 0.75;
const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000; const DEFAULT_MIN_LAYER_SIZE: u32 = 10_000;
@@ -141,13 +139,13 @@ impl Default for LogMergePolicy {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use once_cell::sync::Lazy;
use super::*; use super::*;
use crate::core::{SegmentId, SegmentMeta, SegmentMetaInventory}; use crate::{
use crate::indexer::merge_policy::MergePolicy; core::{SegmentId, SegmentMeta, SegmentMetaInventory},
use crate::schema; schema,
use crate::schema::INDEXED; };
use crate::{indexer::merge_policy::MergePolicy, schema::INDEXED};
use once_cell::sync::Lazy;
static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default); static INVENTORY: Lazy<SegmentMetaInventory> = Lazy::new(SegmentMetaInventory::default);

View File

@@ -1,8 +1,9 @@
use crate::Opstamp;
use crate::SegmentId;
use crate::{Inventory, TrackedObject};
use std::collections::HashSet; use std::collections::HashSet;
use std::ops::Deref; use std::ops::Deref;
use crate::{Inventory, Opstamp, SegmentId, TrackedObject};
#[derive(Default)] #[derive(Default)]
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>); pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);

View File

@@ -1,8 +1,8 @@
use crate::core::SegmentId;
use crate::core::SegmentMeta;
use std::fmt::Debug; use std::fmt::Debug;
use std::marker; use std::marker;
use crate::core::{SegmentId, SegmentMeta};
/// Set of segment suggested for a merge. /// Set of segment suggested for a merge.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MergeCandidate(pub Vec<SegmentId>); pub struct MergeCandidate(pub Vec<SegmentId>);
@@ -39,7 +39,8 @@ impl MergePolicy for NoMergePolicy {
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::core::{SegmentId, SegmentMeta}; use crate::core::SegmentId;
use crate::core::SegmentMeta;
/// `MergePolicy` useful for test purposes. /// `MergePolicy` useful for test purposes.
/// ///

Some files were not shown because too many files have changed in this diff Show More