Compare commits

..

16 Commits

Author SHA1 Message Date
Paul Masurel
9fd23f3abf Fixing bench compilation 2019-10-04 16:36:17 +09:00
Paul Masurel
c030990d00 fmt 2019-10-02 09:50:20 +09:00
Paul Masurel
4c3941750b Waiting potentially longer on watch 2019-10-01 09:50:46 +09:00
Paul Masurel
2ea8e618f2 Merge branch 'hotfix-656' 2019-10-01 09:44:56 +09:00
Paul Masurel
94f27f990b Address #656
Broke the reference loop to make sure that the watch_router can
be dropped, and the thread exits.
2019-10-01 09:34:22 +09:00
Paul Masurel
349e8aa348 Removed enum variants on type alias 2019-09-26 18:43:29 +09:00
Paul Masurel
cde9b78b8d Fixing the issue associated with the Regex performance change 2019-09-18 18:29:27 +09:00
fdb-hiroshima
d8894f0bd2 add checksum check in ManagedDirectory (#605)
* add checksum check in ManagedDirectory

fix #400

* flush after writing checksum

* don't checksum atomic file access and clone managed_paths

* implement a footer storing metadata about a file

this is more of a poc, it require some refactoring into multiple files
`terminate(self)` is implemented, but not used anywhere yet

* address comments and simplify things with new contract

use BitOrder for integer to raw byte conversion
consider atomic write imply atomic read, which might not actually be true
use some indirection to have a boxable terminating writer

* implement TerminatingWrite and make terminate() be called where it should

add dependancy to drop_bomb to help find where terminate() should be called
implement TerminatingWrite for wrapper writers
make tests pass
/!\ some tests seems to pass where they shouldn't

* remove usage of drop_bomb

* fmt

* add test for checksum

* address some review comments

* update changelog

* fmt
2019-09-18 18:26:25 +09:00
fdb-hiroshima
7e08e0047b fix Term documentation (#655)
u64-based fields are actually 4+8=12 bytes long
2019-09-11 18:49:35 +09:00
fdb-hiroshima
1a817f117f fix documentation error (#654)
Union missdocumented as doing an intersection
Union and Intersection can hold more than 2 DocSets
2019-09-11 17:12:08 +09:00
petr-tik
2ec19b21ae Remove unnecessary duplicate methods (#650)
Closes #649

Spotted by @imor
2019-09-09 06:36:04 +09:00
Raminder Singh
141f5a93f7 Using FnvHashMap for mapping UnorderedTermId to TermOrdinal. Fixes #507 (#647)
* Using FnvHashMap for mapping UnorderedTermId to TermOrdinal. Fixes #507

* Fixed cargo fmt errors
2019-09-07 19:40:21 +09:00
Paul Masurel
df47d55cd2 Occur debug interface (#648) 2019-09-07 15:08:45 +09:00
Raminder Singh
5e579fd6b7 Fixed clippy warning: unneeded return statement (#646) 2019-09-07 10:14:37 +09:00
Paul Masurel
4b9c1dce69 Moving queyr grammar to a different crate. (#645) 2019-09-05 09:37:28 +09:00
Paul Masurel
d74f71bbef Lighter regex dependency. (#644)
Detail on https://github.com/rust-lang/regex/pull/613
2019-09-04 13:10:12 +09:00
99 changed files with 761 additions and 696 deletions

View File

@@ -7,7 +7,8 @@ Tantivy 0.11.0
- Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #630 (@brainlock)
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
## How to update?
@@ -15,6 +16,12 @@ Tantivy 0.11.0
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required.
Tantivy 0.10.2
=====================
- Closes #656. Solving memory leak.
Tantivy 0.10.1
=====================

View File

@@ -15,8 +15,9 @@ edition = "2018"
[dependencies]
base64 = "0.10.0"
byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0"
regex = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true}
@@ -24,7 +25,6 @@ snap = {version="0.2"}
atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0"
log = "0.4"
combine = ">=3.6.0,<4.0.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
@@ -42,6 +42,7 @@ owning_ref = "0.4"
stable_deref_trait = "1.0.0"
rust-stemmers = "1.1"
downcast-rs = { version="1.0" }
tantivy-query-grammar = { path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.2"
fnv = "1.0.6"
@@ -54,10 +55,6 @@ murmurhash32 = "0.2"
chrono = "0.4"
smallvec = "0.6"
tantivy-schema = {path= "./tantivy-schema"}
tantivy-tokenizer = {path= "./tantivy-tokenizer"}
tantivy-common = {path="./tantivy-common"}
[target.'cfg(windows)'.dependencies]
winapi = "0.3"
@@ -84,10 +81,14 @@ failpoints = ["fail/failpoints"]
unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"]
[workspace]
members = ["query-grammar"]
[badges]
travis-ci = { repository = "tantivy-search/tantivy" }
[dev-dependencies.fail]
version = "0.3"
features = ["failpoints"]
# Following the "fail" crate best practises, we isolate
@@ -101,7 +102,3 @@ features = ["failpoints"]
name = "failpoints"
path = "tests/failpoints/mod.rs"
required-features = ["fail/failpoints"]
[workspace]
members = ["tantivy-schema", "tantivy-common", "tantivy-tokenizer"]

3
Makefile Normal file
View File

@@ -0,0 +1,3 @@
test:
echo "Run test only... No examples."
cargo test --tests --lib

View File

@@ -7,7 +7,7 @@ set -ex
main() {
if [ ! -z $CODECOV ]; then
echo "Codecov"
cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov
else
echo "Build"
cross build --target $TARGET
@@ -15,7 +15,8 @@ main() {
return
fi
echo "Test"
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
cross test --target $TARGET --no-default-features --features mmap
cross test --target $TARGET --no-default-features --features mmap query-grammar
fi
for example in $(ls examples/*.rs)
do

View File

@@ -46,10 +46,9 @@ fn main() -> tantivy::Result<()> {
thread::spawn(move || {
// we index 100 times the document... for the sake of the example.
for i in 0..100 {
let opstamp = {
// A read lock is sufficient here.
let index_writer_rlock = index_writer_clone_1.read().unwrap();
index_writer_rlock.add_document(
let opstamp = index_writer_clone_1
.read().unwrap() //< A read lock is sufficient here.
.add_document(
doc!(
title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
@@ -60,8 +59,7 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool"
))
};
));
println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(20));
}

16
query-grammar/Cargo.toml Normal file
View File

@@ -0,0 +1,16 @@
[package]
name = "tantivy-query-grammar"
version = "0.11.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
combine = ">=3.6.0,<4.0.0"

17
query-grammar/src/lib.rs Normal file
View File

@@ -0,0 +1,17 @@
#![recursion_limit = "100"]
mod occur;
mod query_grammar;
mod user_input_ast;
use combine::parser::Parser;
pub use crate::occur::Occur;
use crate::query_grammar::parse_to_ast;
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
pub struct Error;
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
Ok(user_input_ast)
}

View File

@@ -1,3 +1,6 @@
use std::fmt;
use std::fmt::Write;
/// Defines whether a term in a query must be present,
/// should be present or must not be present.
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
@@ -18,32 +21,38 @@ impl Occur {
/// - `Should` => '?',
/// - `Must` => '+'
/// - `Not` => '-'
pub fn to_char(self) -> char {
fn to_char(self) -> char {
match self {
Occur::Should => '?',
Occur::Must => '+',
Occur::MustNot => '-',
}
}
}
/// Compose two occur values.
pub fn compose_occur(left: Occur, right: Occur) -> Occur {
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
/// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur {
match left {
Occur::Should => right,
Occur::Must => {
if right == Occur::MustNot {
Occur::MustNot
} else {
Occur::Must
}
}
}
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
Occur::MustNot => {
if right == Occur::MustNot {
Occur::Must
} else {
Occur::MustNot
}
}
}
}
}
impl fmt::Display for Occur {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_char(self.to_char())
}
}

View File

@@ -1,6 +1,5 @@
use super::user_input_ast::*;
use crate::query::occur::Occur;
use crate::query::query_parser::user_input_ast::UserInputBound;
use crate::Occur;
use combine::char::*;
use combine::error::StreamError;
use combine::stream::StreamErrorFor;

View File

@@ -1,7 +1,7 @@
use std::fmt;
use std::fmt::{Debug, Formatter};
use crate::query::Occur;
use crate::Occur;
#[derive(PartialEq)]
pub enum UserInputLeaf {
@@ -151,7 +151,7 @@ impl fmt::Debug for UserInputAST {
Ok(())
}
UserInputAST::Unary(ref occur, ref subquery) => {
write!(formatter, "{}({:?})", occur.to_char(), subquery)
write!(formatter, "{}({:?})", occur, subquery)
}
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
}

View File

@@ -123,5 +123,4 @@ mod tests {
assert_eq!(count_collector.harvest(), 2);
}
}
}

View File

@@ -599,19 +599,18 @@ mod tests {
);
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use collector::FacetCollector;
use query::AllQuery;
use rand::{thread_rng, Rng};
use schema::Facet;
use schema::Schema;
use crate::collector::FacetCollector;
use crate::query::AllQuery;
use crate::schema::{Facet, Schema};
use crate::Index;
use rand::seq::SliceRandom;
use rand::thread_rng;
use test::Bencher;
use Index;
#[bench]
fn bench_facet_collector(b: &mut Bencher) {
@@ -628,7 +627,7 @@ mod bench {
}
}
// 40425 docs
thread_rng().shuffle(&mut docs[..]);
docs[..].shuffle(&mut thread_rng());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs {
@@ -637,7 +636,7 @@ mod bench {
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let searcher = index.searcher();
let searcher = reader.searcher();
let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap();
});

View File

@@ -592,5 +592,4 @@ mod tests {
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
}

View File

@@ -2,7 +2,7 @@ use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use std::io;
use std::ops::Deref;
pub struct BitPacker {
pub(crate) struct BitPacker {
mini_buffer: u64,
mini_buffer_written: usize,
}

View File

@@ -2,7 +2,7 @@ use std::fmt;
use std::u64;
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct TinySet(u64);
pub(crate) struct TinySet(u64);
impl fmt::Debug for TinySet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -179,7 +179,7 @@ impl BitSet {
///
/// Reminder: the tiny set with the bucket `bucket`, represents the
/// elements from `bucket * 64` to `(bucket+1) * 64`.
pub fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> {
pub(crate) fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> {
self.tinysets[bucket as usize..]
.iter()
.cloned()
@@ -194,7 +194,7 @@ impl BitSet {
/// Returns the tiny bitset representing the
/// the set restricted to the number range from
/// `bucket * 64` to `(bucket + 1) * 64`.
pub fn tinyset(&self, bucket: u32) -> TinySet {
pub(crate) fn tinyset(&self, bucket: u32) -> TinySet {
self.tinysets[bucket as usize]
}
}
@@ -204,7 +204,12 @@ mod tests {
use super::BitSet;
use super::TinySet;
use std::collections::{BTreeSet, HashSet};
use crate::docset::DocSet;
use crate::query::BitSetDocSet;
use crate::tests;
use crate::tests::generate_nonunique_unsorted;
use std::collections::BTreeSet;
use std::collections::HashSet;
#[test]
fn test_tiny_set() {
@@ -259,19 +264,26 @@ mod tests {
test_against_hashset(&[62u32, 63u32], 64);
}
// #[test]
// fn test_bitset_clear() {
// let mut bitset = BitSet::with_max_value(1_000);
// let els = tests::sample(1_000, 0.01f64);
// for &el in &els {
// bitset.insert(el);
// }
// assert!(els.iter().all(|el| bitset.contains(*el)));
// bitset.clear();
// for el in 0u32..1000u32 {
// assert!(!bitset.contains(el));
// }
// }
#[test]
fn test_bitset_large() {
let arr = generate_nonunique_unsorted(100_000, 5_000);
let mut btreeset: BTreeSet<u32> = BTreeSet::new();
let mut bitset = BitSet::with_max_value(100_000);
for el in arr {
btreeset.insert(el);
bitset.insert(el);
}
for i in 0..100_000 {
assert_eq!(btreeset.contains(&i), bitset.contains(i));
}
assert_eq!(btreeset.len(), bitset.len());
let mut bitset_docset = BitSetDocSet::from(bitset);
for el in btreeset.into_iter() {
bitset_docset.advance();
assert_eq!(bitset_docset.doc(), el);
}
assert!(!bitset_docset.advance());
}
#[test]
fn test_bitset_num_buckets() {
@@ -327,6 +339,19 @@ mod tests {
assert_eq!(bitset.len(), 3);
}
#[test]
fn test_bitset_clear() {
let mut bitset = BitSet::with_max_value(1_000);
let els = tests::sample(1_000, 0.01f64);
for &el in &els {
bitset.insert(el);
}
assert!(els.iter().all(|el| bitset.contains(*el)));
bitset.clear();
for el in 0u32..1000u32 {
assert!(!bitset.contains(el));
}
}
}
#[cfg(all(test, feature = "unstable"))]

View File

@@ -2,7 +2,7 @@ use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::common::VInt;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
@@ -42,7 +42,7 @@ pub struct CompositeWrite<W = WritePtr> {
offsets: HashMap<FileAddr, u64>,
}
impl<W: Write> CompositeWrite<W> {
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file
/// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> {
@@ -91,8 +91,7 @@ impl<W: Write> CompositeWrite<W> {
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?;
self.write.flush()?;
Ok(())
self.write.terminate()
}
}
@@ -231,5 +230,4 @@ mod test {
}
}
}
}

View File

@@ -1,3 +1,5 @@
use crate::directory::AntiCallToken;
use crate::directory::TerminatingWrite;
use std::io;
use std::io::Write;
@@ -42,6 +44,13 @@ impl<W: Write> Write for CountingWriter<W> {
}
}
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.underlying.terminate_ref(token)
}
}
#[cfg(test)]
mod test {

View File

@@ -1,18 +1,18 @@
pub mod bitpacker;
mod bitset;
mod composite_file;
mod counting_writer;
mod serialize;
mod vint;
pub use self::bitset::BitSet;
pub use self::bitset::TinySet;
pub(crate) use self::bitset::TinySet;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::counting_writer::CountingWriter;
pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
pub use byteorder::LittleEndian as Endianness;
pub type DateTime = chrono::DateTime<chrono::Utc>;
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
///
/// We do not allow segments with more than
@@ -42,7 +42,7 @@ pub const MAX_DOC_LIMIT: u32 = 1 << 31;
/// a very large range of values. Even in this case, it results
/// in an extra cost of at most 12% compared to the optimal
/// number of bits.
pub fn compute_num_bits(n: u64) -> u8 {
pub(crate) fn compute_num_bits(n: u64) -> u8 {
let amplitude = (64u32 - n.leading_zeros()) as u8;
if amplitude <= 64 - 8 {
amplitude
@@ -51,7 +51,7 @@ pub fn compute_num_bits(n: u64) -> u8 {
}
}
pub fn is_power_of_2(n: usize) -> bool {
pub(crate) fn is_power_of_2(n: usize) -> bool {
(n > 0) && (n & (n - 1) == 0)
}
@@ -131,12 +131,10 @@ pub fn u64_to_f64(val: u64) -> f64 {
})
}
pub use self::serialize::fixed_size_test;
#[cfg(test)]
pub(crate) mod test {
use super::fixed_size_test;
pub use super::serialize::test::fixed_size_test;
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
use std::f64;

View File

@@ -1,5 +1,5 @@
use crate::Endianness;
use crate::VInt;
use crate::common::Endianness;
use crate::common::VInt;
use byteorder::{ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io;
@@ -145,17 +145,17 @@ impl BinarySerializable for String {
}
}
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
}
#[cfg(test)]
mod test {
pub mod test {
use super::*;
use crate::VInt;
use crate::common::VInt;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
}
fn serialize_test<T: BinarySerializable + Eq>(v: T) -> usize {
let mut buffer: Vec<u8> = Vec::new();
@@ -199,10 +199,7 @@ mod test {
fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1);
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
assert_eq!(
serialize_test(String::from("富士さん見える。")),
1 + 3 * 8
);
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
}
#[test]

View File

@@ -171,7 +171,7 @@ mod tests {
use super::serialize_vint_u32;
use super::VInt;
use crate::BinarySerializable;
use crate::common::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian};
fn aux_test_vint(val: u64) {

View File

@@ -26,9 +26,10 @@ use crate::IndexWriter;
use crate::Result;
use num_cpus;
use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::fmt;
#[cfg(feature = "mmap")]
use std::path::Path;
use std::path::{Path, PathBuf};
use std::sync::Arc;
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
@@ -368,6 +369,11 @@ impl Index {
.map(SegmentMeta::id)
.collect())
}
/// Returns the set of corrupted files
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
self.directory.list_damaged().map_err(Into::into)
}
}
impl fmt::Debug for Index {
@@ -595,5 +601,4 @@ mod tests {
assert_eq!(searcher.num_docs(), 8_000);
assert!(mem_right_after_merge_finished < mem_right_after_commit);
}
}

View File

@@ -1,3 +1,4 @@
use crate::common::CompositeFile;
use crate::common::HasLen;
use crate::core::InvertedIndexReader;
use crate::core::Segment;
@@ -14,7 +15,6 @@ use crate::schema::Schema;
use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader;
use crate::termdict::TermDictionary;
use crate::CompositeFile;
use crate::DocId;
use crate::Result;
use fail::fail_point;

View File

@@ -118,6 +118,8 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file
@@ -157,6 +159,8 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data.

213
src/directory/footer.rs Normal file
View File

@@ -0,0 +1,213 @@
use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite};
use byteorder::{ByteOrder, LittleEndian};
use crc32fast::Hasher;
use std::io;
use std::io::Write;
const COMMON_FOOTER_SIZE: usize = 4 * 5;
#[derive(Debug, Clone, PartialEq)]
pub struct Footer {
pub tantivy_version: (u32, u32, u32),
pub meta: String,
pub versioned_footer: VersionedFooter,
}
impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self {
let tantivy_version = (
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer {
tantivy_version,
meta: format!(
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer,
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut res = self.versioned_footer.to_bytes();
res.extend_from_slice(self.meta.as_bytes());
let len = res.len();
res.resize(len + COMMON_FOOTER_SIZE, 0);
let mut common_footer = &mut res[len..];
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
}
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The footer len is {}, while the entire file len is {}",
size, len
),
));
}
let footer = &data[len - size as usize..];
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
Ok(Footer {
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
}
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
let footer = Footer::from_bytes(source.as_slice())?;
let reader = source.slice_to(source.as_slice().len() - footer.size());
Ok((footer, reader))
}
pub fn size(&self) -> usize {
self.versioned_footer.size() as usize + self.meta.len() + 20
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion { version: u32, size: u32 },
V0(u32), // crc
}
impl VersionedFooter {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
}
}
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
assert!(footer.len() >= 4);
let version = LittleEndian::read_u32(footer);
match version {
0 => {
if footer.len() == 8 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
} else {
Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The versioned footer len is {}, while it should be 8",
footer.len()
),
))
}
}
version => Ok(VersionedFooter::UnknownVersion {
version,
size: footer.len() as u32,
}),
}
}
pub fn size(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 8,
VersionedFooter::UnknownVersion { size, .. } => *size,
}
}
pub fn version(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 0,
VersionedFooter::UnknownVersion { version, .. } => *version,
}
}
pub fn crc(&self) -> Option<u32> {
match self {
VersionedFooter::V0(crc) => Some(*crc),
VersionedFooter::UnknownVersion { .. } => None,
}
}
}
pub(crate) struct FooterProxy<W: TerminatingWrite> {
/// always Some except after terminate call
hasher: Option<Hasher>,
/// always Some except after terminate call
writer: Option<W>,
}
impl<W: TerminatingWrite> FooterProxy<W> {
pub fn new(writer: W) -> Self {
FooterProxy {
hasher: Some(Hasher::new()),
writer: Some(writer),
}
}
}
impl<W: TerminatingWrite> Write for FooterProxy<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let count = self.writer.as_mut().unwrap().write(buf)?;
self.hasher.as_mut().unwrap().update(&buf[..count]);
Ok(count)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.as_mut().unwrap().flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
let mut writer = self.writer.take().unwrap();
writer.write_all(&footer)?;
writer.terminate()
}
}
#[cfg(test)]
mod tests {
use crate::directory::footer::{Footer, VersionedFooter};
#[test]
fn test_serialize_deserialize_footer() {
let crc = 123456;
let footer = Footer::new(VersionedFooter::V0(crc));
let footer_bytes = footer.to_bytes();
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
}
}

View File

@@ -1,5 +1,6 @@
use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock;
use crate::directory::Lock;
use crate::directory::META_LOCK;
@@ -8,6 +9,7 @@ use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption;
use crate::Directory;
use crate::Result;
use crc32fast::Hasher;
use serde_json;
use std::collections::HashSet;
use std::io;
@@ -207,17 +209,59 @@ impl ManagedDirectory {
}
Ok(())
}
/// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?;
let (footer, data) = Footer::extract_footer(reader)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
let mut hasher = Hasher::new();
hasher.update(data.as_slice());
let crc = hasher.finalize();
Ok(footer
.versioned_footer
.crc()
.map(|v| v == crc)
.unwrap_or(false))
}
/// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations
.read()
.expect("Managed directory rlock poisoned in list damaged.")
.managed_paths
.clone();
for path in managed_paths.into_iter() {
if !self.validate_checksum(&path)? {
hashset.insert(path);
}
}
Ok(hashset)
}
}
impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.directory.open_read(path)
let read_only_source = self.directory.open_read(path)?;
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
self.directory.open_write(path)
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
self.directory
.open_write(path)?
.into_inner()
.map_err(|_| ())
.expect("buffer should be empty"),
))))
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -259,8 +303,9 @@ impl Clone for ManagedDirectory {
#[cfg(test)]
mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory};
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
use std::collections::HashSet;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use tempfile::TempDir;
@@ -275,8 +320,8 @@ mod tests_mmap_specific {
{
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write_file = managed_directory.open_write(test_path1).unwrap();
write_file.flush().unwrap();
let write_file = managed_directory.open_write(test_path1).unwrap();
write_file.terminate().unwrap();
managed_directory
.atomic_write(test_path2, &[0u8, 1u8])
.unwrap();
@@ -310,9 +355,9 @@ mod tests_mmap_specific {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
managed_directory
.atomic_write(test_path1, &vec![0u8, 1u8])
.unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
@@ -331,4 +376,38 @@ mod tests_mmap_specific {
}
}
#[test]
fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap();
assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2);
let mut file = OpenOptions::new()
.write(true)
.open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file);
let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2));
}
}

View File

@@ -11,6 +11,7 @@ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory;
use crate::directory::DirectoryLock;
use crate::directory::Lock;
@@ -18,7 +19,7 @@ use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle;
use crate::directory::WritePtr;
use crate::directory::{TerminatingWrite, WritePtr};
use atomicwrites;
use memmap::Mmap;
use std::collections::HashMap;
@@ -141,42 +142,28 @@ impl MmapCache {
}
}
struct InnerWatcherWrapper {
_watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: WatchCallbackList,
}
impl InnerWatcherWrapper {
pub fn new(path: &Path) -> Result<(Self, Receiver<notify::RawEvent>), notify::Error> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let mut watcher = notify::raw_watcher(tx)?;
watcher.watch(path, RecursiveMode::Recursive)?;
let inner = InnerWatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router: Default::default(),
};
Ok((inner, watcher_recv))
}
}
#[derive(Clone)]
struct WatcherWrapper {
inner: Arc<InnerWatcherWrapper>,
_watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: Arc<WatchCallbackList>,
}
impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (inner, watcher_recv) = InnerWatcherWrapper::new(path).map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_wrapper = WatcherWrapper {
inner: Arc::new(inner),
};
let watcher_wrapper_clone = watcher_wrapper.clone();
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let watcher = notify::raw_watcher(tx)
.and_then(|mut watcher| {
watcher.watch(path, RecursiveMode::Recursive)?;
Ok(watcher)
})
.map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_router: Arc<WatchCallbackList> = Default::default();
let watcher_router_clone = watcher_router.clone();
thread::Builder::new()
.name("meta-file-watch-thread".to_string())
.spawn(move || {
@@ -187,7 +174,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH {
watcher_wrapper_clone.inner.watcher_router.broadcast();
watcher_router_clone.broadcast();
}
}
}
@@ -200,13 +187,15 @@ impl WatcherWrapper {
}
}
}
})
.expect("Failed to spawn thread to watch meta.json");
Ok(watcher_wrapper)
})?;
Ok(WatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router,
})
}
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
self.inner.watcher_router.subscribe(watch_callback)
self.watcher_router.subscribe(watch_callback)
}
}
@@ -265,7 +254,7 @@ impl MmapDirectoryInner {
}
}
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
return Ok(watch_wrapper.watch(watch_callback));
Ok(watch_wrapper.watch(watch_callback))
} else {
unreachable!("At this point, watch wrapper is supposed to be initialized");
}
@@ -412,6 +401,12 @@ impl Seek for SafeFileWriter {
}
}
impl TerminatingWrite for SafeFileWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
@@ -539,7 +534,7 @@ impl Directory for MmapDirectory {
#[cfg(test)]
mod tests {
// There are more tests in directory/lib.rs
// There are more tests in directory/mod.rs
// The following tests are specific to the MmapDirectory
use super::*;

View File

@@ -9,6 +9,7 @@ mod mmap_directory;
mod directory;
mod directory_lock;
mod footer;
mod managed_directory;
mod ram_directory;
mod read_only_source;
@@ -24,18 +25,49 @@ pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{BufWriter, Write};
use std::io::{self, BufWriter, Write};
#[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
/// Write object for Directory.
///
/// `WritePtr` are required to implement both Write
/// and Seek.
pub type WritePtr = BufWriter<Box<dyn Write>>;
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>;
#[cfg(test)]
mod tests;

View File

@@ -1,8 +1,9 @@
use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList;
use crate::directory::WritePtr;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr};
use fail::fail_point;
use std::collections::HashMap;
use std::fmt;
@@ -71,6 +72,12 @@ impl Write for VecWriter {
}
}
impl TerminatingWrite for VecWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[derive(Default)]
struct InnerDirectory {
fs: HashMap<PathBuf, ReadOnlySource>,

View File

@@ -127,7 +127,7 @@ fn test_watch(directory: &mut dyn Directory) {
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok());
for _ in 0..100 {
for _ in 0..1_000 {
if counter.load(Ordering::SeqCst) > i {
break;
}

View File

@@ -152,5 +152,4 @@ mod tests {
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst));
}
}

View File

@@ -148,13 +148,13 @@ fn value_to_u64(value: &Value) -> u64 {
mod tests {
use super::*;
use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
use crate::schema::Document;
use crate::schema::Field;
use crate::schema::Schema;
use crate::schema::FAST;
use crate::CompositeFile;
use once_cell::sync::Lazy;
use rand::prelude::SliceRandom;
use rand::rngs::StdRng;
@@ -429,7 +429,6 @@ mod tests {
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
@@ -437,9 +436,9 @@ mod bench {
use super::tests::FIELD;
use super::tests::{generate_permutation, SCHEMA};
use super::*;
use common::CompositeFile;
use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::FastFieldReader;
use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
use std::collections::HashMap;
use std::path::Path;
use test::{self, Bencher};
@@ -537,5 +536,4 @@ mod bench {
});
}
}
}

View File

@@ -5,8 +5,8 @@ use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal;
use crate::DocId;
use fnv::FnvHashMap;
use itertools::Itertools;
use std::collections::HashMap;
use std::io;
/// Writer for multi-valued (as in, more than one value per document)
@@ -102,7 +102,7 @@ impl MultiValueIntFastFieldWriter {
pub fn serialize(
&self,
serializer: &mut FastFieldSerializer,
mapping_opt: Option<&HashMap<UnorderedTermId, TermOrdinal>>,
mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> {
{
// writing the offset index

View File

@@ -2,12 +2,12 @@ use super::FastValue;
use crate::common::bitpacker::BitUnpacker;
use crate::common::compute_num_bits;
use crate::common::BinarySerializable;
use crate::common::CompositeFile;
use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema;
use crate::schema::FAST;
use crate::CompositeFile;
use crate::DocId;
use owning_ref::OwningRef;
use std::collections::HashMap;

View File

@@ -1,9 +1,9 @@
use crate::common::CompositeFile;
use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::space_usage::PerFieldSpaceUsage;
use crate::CompositeFile;
use crate::Result;
use std::collections::HashMap;

View File

@@ -1,10 +1,10 @@
use crate::common::bitpacker::BitPacker;
use crate::common::compute_num_bits;
use crate::common::BinarySerializable;
use crate::common::CompositeWrite;
use crate::common::CountingWriter;
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::CompositeWrite;
use std::io::{self, Write};
/// `FastFieldSerializer` is in charge of serializing

View File

@@ -6,6 +6,7 @@ use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
@@ -116,7 +117,7 @@ impl FastFieldsWriter {
pub fn serialize(
&self,
serializer: &mut FastFieldSerializer,
mapping: &HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>,
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> {
for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?;

View File

@@ -1,6 +1,6 @@
use crate::common::CompositeWrite;
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::CompositeWrite;
use std::io;
use std::io::Write;

View File

@@ -8,6 +8,7 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::directory::TerminatingWrite;
use crate::docset::DocSet;
use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset;
@@ -168,6 +169,7 @@ pub(crate) fn advance_deletes(
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, &mut delete_file)?;
delete_file.terminate()?;
}
}
segment_entry.set_meta(segment.meta().clone());
@@ -1177,5 +1179,4 @@ mod tests {
assert!(clear_again.is_ok());
assert!(commit_again.is_ok());
}
}

View File

@@ -134,5 +134,4 @@ mod tests {
}
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
}
}

View File

@@ -296,5 +296,4 @@ mod tests {
assert_eq!(initial_table_size(10_000_000).unwrap(), 17);
assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19);
}
}

View File

@@ -1,5 +1,4 @@
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
#![recursion_limit = "100"]
#![cfg_attr(all(feature = "unstable", test), feature(test))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
@@ -102,6 +101,9 @@
#[macro_use]
extern crate serde_derive;
#[cfg_attr(test, macro_use)]
extern crate serde_json;
#[macro_use]
extern crate log;
@@ -118,9 +120,6 @@ mod functional_test;
#[macro_use]
mod macros;
mod composite_file;
pub(crate) use composite_file::{CompositeFile, CompositeWrite};
pub use crate::error::TantivyError;
#[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")]
@@ -133,22 +132,22 @@ pub type Result<T> = std::result::Result<T, error::TantivyError>;
/// Tantivy DateTime
pub type DateTime = chrono::DateTime<chrono::Utc>;
pub use tantivy_common as common;
pub use tantivy_schema as schema;
pub use tantivy_tokenizer as tokenizer;
mod common;
mod core;
mod indexer;
pub mod collector;
pub mod directory;
#[allow(unused_doc_comments)]
mod error;
pub mod tokenizer;
pub mod collector;
pub mod directory;
pub mod fastfield;
pub mod fieldnorm;
pub(crate) mod positions;
pub mod postings;
pub mod query;
pub mod schema;
pub mod space_usage;
pub mod store;
pub mod termdict;
@@ -252,6 +251,7 @@ mod tests {
use crate::Postings;
use crate::ReloadPolicy;
use rand::distributions::Bernoulli;
use rand::distributions::Uniform;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
@@ -268,6 +268,14 @@ mod tests {
(a - b).abs() < 0.0005 * (a + b).abs()
}
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
let seed: [u8; 32] = [1; 32];
StdRng::from_seed(seed)
.sample_iter(&Uniform::new(0u32, max_value))
.take(n_elems)
.collect::<Vec<u32>>()
}
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
StdRng::from_seed([seed_val; 32])
.sample_iter(&Bernoulli::new(ratio).unwrap())
@@ -277,6 +285,10 @@ mod tests {
.collect()
}
pub fn sample(n: u32, ratio: f64) -> Vec<u32> {
sample_with_seed(n, ratio, 4)
}
#[test]
#[cfg(feature = "mmap")]
fn test_indexing() {

View File

@@ -274,13 +274,15 @@ pub mod tests {
mod bench {
use super::*;
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
use rand::{Rng, XorShiftRng};
use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let seed: &[u8; 16] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
let mut seed: [u8; 32] = [0; 32];
seed[31] = seed_val;
let mut rng = StdRng::from_seed(seed);
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
}

View File

@@ -622,23 +622,23 @@ pub mod tests {
assert!(!postings_unopt.advance());
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::*;
use docset::SkipResult;
use query::Intersection;
use schema::IndexRecordOption;
use crate::docset::SkipResult;
use crate::query::Intersection;
use crate::schema::IndexRecordOption;
use crate::tests;
use crate::DocSet;
use test::{self, Bencher};
use tests;
use DocSet;
#[bench]
fn bench_segment_postings(b: &mut Bencher) {
let searcher = INDEX.searcher();
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
@@ -652,7 +652,8 @@ mod bench {
#[bench]
fn bench_segment_intersection(b: &mut Bencher) {
let searcher = INDEX.searcher();
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let segment_postings_a = segment_reader
@@ -682,7 +683,8 @@ mod bench {
}
fn bench_skip_next(p: f64, b: &mut Bencher) {
let searcher = INDEX.searcher();
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let docs = tests::sample(segment_reader.num_docs(), p);
@@ -737,7 +739,8 @@ mod bench {
#[bench]
fn bench_iterate_segment_postings(b: &mut Bencher) {
let searcher = INDEX.searcher();
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let n: u32 = test::black_box(17);

View File

@@ -12,6 +12,7 @@ use crate::tokenizer::TokenStream;
use crate::tokenizer::{Token, MAX_TOKEN_LEN};
use crate::DocId;
use crate::Result;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
use std::marker::PhantomData;
@@ -127,12 +128,12 @@ impl MultiFieldPostingsWriter {
pub fn serialize(
&self,
serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> {
) -> Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect();
term_offsets.sort_unstable_by_key(|&(k, _, _)| k);
let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> =
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> =
HashMap::new();
let field_offsets = make_field_partition(&term_offsets);
@@ -147,7 +148,7 @@ impl MultiFieldPostingsWriter {
let unordered_term_ids = term_offsets[start..stop]
.iter()
.map(|&(_, _, bucket)| bucket);
let mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
.enumerate()
.map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)

View File

@@ -1,6 +1,6 @@
use super::TermInfo;
use crate::common::CountingWriter;
use crate::common::{BinarySerializable, VInt};
use crate::common::{CompositeWrite, CountingWriter};
use crate::core::Segment;
use crate::directory::WritePtr;
use crate::positions::PositionSerializer;
@@ -10,7 +10,6 @@ use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::CompositeWrite;
use crate::DocId;
use crate::Result;
use std::io::{self, Write};
@@ -142,10 +141,7 @@ impl<'a> FieldSerializer<'a> {
FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() {
let index_option = text_indexing_options.index_option();
(
index_option.is_termfreq_enabled(),
index_option.is_position_enabled(),
)
(index_option.has_freq(), index_option.has_positions())
} else {
(false, false)
}

View File

@@ -310,6 +310,7 @@ mod bench {
use super::super::MemoryArena;
use super::ExpUnrolledLinkedList;
use byteorder::{NativeEndian, WriteBytesExt};
use std::iter;
use test::Bencher;
const NUM_STACK: usize = 10_000;
@@ -335,11 +336,10 @@ mod bench {
fn bench_push_stack(bench: &mut Bencher) {
bench.iter(|| {
let mut heap = MemoryArena::new();
let mut stacks = Vec::with_capacity(100);
for _ in 0..NUM_STACK {
let mut stack = ExpUnrolledLinkedList::new();
stacks.push(stack);
}
let mut stacks: Vec<ExpUnrolledLinkedList> =
iter::repeat_with(ExpUnrolledLinkedList::new)
.take(NUM_STACK)
.collect();
for s in 0..NUM_STACK {
for i in 0u32..STACK_SIZE {
let t = s * 392017 % NUM_STACK;

View File

@@ -45,7 +45,7 @@ impl BinarySerializable for TermInfo {
mod tests {
use super::TermInfo;
use crate::common::fixed_size_test;
use crate::common::test::fixed_size_test;
#[test]
fn test_fixed_size() {

View File

@@ -130,5 +130,4 @@ mod tests {
assert!(!scorer.advance());
}
}
}

View File

@@ -223,13 +223,12 @@ mod bench {
use super::BitSet;
use super::BitSetDocSet;
use test;
use tests;
use DocSet;
use crate::test;
use crate::tests;
use crate::DocSet;
#[bench]
fn bench_bitset_1pct_insert(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
b.iter(|| {
let mut bitset = BitSet::with_max_value(1_000_000);
@@ -241,7 +240,6 @@ mod bench {
#[bench]
fn bench_bitset_1pct_clone(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
let mut bitset = BitSet::with_max_value(1_000_000);
for el in els {

View File

@@ -137,5 +137,4 @@ mod tests {
fn test_idf() {
assert_nearly_equals(idf(1, 2), 0.6931472);
}
}

View File

@@ -247,9 +247,7 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser
.parse_query("Оксана Лифенко")
.unwrap();
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
scorer.advance();

View File

@@ -175,5 +175,4 @@ mod tests {
sample_skip,
);
}
}

View File

@@ -45,7 +45,7 @@ pub fn intersect_scorers(mut scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer> {
})
}
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
/// Creates a `DocSet` that iterate through the intersection of two or more `DocSet`s.
pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>> {
left: TDocSet,
right: TDocSet,

View File

@@ -5,7 +5,7 @@ use Score;
use SkipResult;
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
/// Creates a `DocSet` that iterate through the intersection of two `DocSet`s.
pub struct IntersectionTwoTerms<TDocSet> {
left: TDocSet,
right: TDocSet

View File

@@ -12,7 +12,6 @@ mod exclude;
mod explanation;
mod fuzzy_query;
mod intersection;
mod occur;
mod phrase_query;
mod query;
mod query_parser;
@@ -43,7 +42,6 @@ pub use self::exclude::Exclude;
pub use self::explanation::Explanation;
pub use self::fuzzy_query::FuzzyTermQuery;
pub use self::intersection::intersect_scorers;
pub use self::occur::Occur;
pub use self::phrase_query::PhraseQuery;
pub use self::query::Query;
pub use self::query_parser::QueryParser;
@@ -55,6 +53,7 @@ pub use self::scorer::ConstScorer;
pub use self::scorer::Scorer;
pub use self::term_query::TermQuery;
pub use self::weight::Weight;
pub use tantivy_query_grammar::Occur;
#[cfg(test)]
mod tests {

View File

@@ -1,6 +1,4 @@
mod query_grammar;
mod query_parser;
mod user_input_ast;
pub mod logical_ast;
pub use self::query_parser::QueryParser;

View File

@@ -1,9 +1,5 @@
use super::logical_ast::*;
use super::query_grammar::parse_to_ast;
use super::user_input_ast::*;
use crate::core::Index;
use crate::query::occur::compose_occur;
use crate::query::query_parser::logical_ast::LogicalAST;
use crate::query::AllQuery;
use crate::query::BooleanQuery;
use crate::query::EmptyQuery;
@@ -16,11 +12,11 @@ use crate::schema::IndexRecordOption;
use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager;
use combine::Parser;
use std::borrow::Cow;
use std::num::{ParseFloatError, ParseIntError};
use std::ops::Bound;
use std::str::FromStr;
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
/// Possible error that may happen when parsing a query.
#[derive(Debug, PartialEq, Eq, Fail)]
@@ -222,9 +218,8 @@ impl QueryParser {
/// Parse the user query into an AST.
fn parse_query_to_logical_ast(&self, query: &str) -> Result<LogicalAST, QueryParserError> {
let (user_input_ast, _remaining) = parse_to_ast()
.parse(query)
.map_err(|_| QueryParserError::SyntaxError)?;
let user_input_ast =
tantivy_query_grammar::parse_query(query).map_err(|_| QueryParserError::SyntaxError)?;
self.compute_logical_ast(user_input_ast)
}
@@ -399,7 +394,7 @@ impl QueryParser {
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
for sub_query in sub_queries {
let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?;
let new_occur = compose_occur(default_occur, occur);
let new_occur = Occur::compose(default_occur, occur);
logical_sub_queries.push((new_occur, sub_ast));
}
Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries)))
@@ -407,7 +402,7 @@ impl QueryParser {
UserInputAST::Unary(left_occur, subquery) => {
let (right_occur, logical_sub_queries) =
self.compute_logical_ast_with_occur(*subquery)?;
Ok((compose_occur(left_occur, right_occur), logical_sub_queries))
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries))
}
UserInputAST::Leaf(leaf) => {
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;

View File

@@ -479,5 +479,4 @@ mod tests {
91
);
}
}

View File

@@ -190,5 +190,4 @@ mod tests {
skip_docs,
);
}
}

View File

@@ -28,7 +28,7 @@ where
}
}
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
/// Creates a `DocSet` that iterate through the union of two or more `DocSet`s.
pub struct Union<TScorer, TScoreCombiner = DoNothingCombiner> {
docsets: Vec<TScorer>,
bitsets: Box<[TinySet; HORIZON_NUM_TINYBITSETS]>,
@@ -409,20 +409,17 @@ mod tests {
vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000],
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use query::score_combiner::DoNothingCombiner;
use query::ConstScorer;
use query::Union;
use query::VecDocSet;
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{ConstScorer, Union, VecDocSet};
use crate::tests;
use crate::DocId;
use crate::DocSet;
use test::Bencher;
use tests;
use DocId;
use DocSet;
#[bench]
fn bench_union_3_high(bench: &mut Bencher) {

View File

@@ -82,5 +82,4 @@ pub mod tests {
}
assert_eq!(postings.fill_buffer(&mut buffer[..]), 9);
}
}

View File

@@ -1,10 +1,9 @@
use super::*;
use crate::common::BinarySerializable;
use crate::common::VInt;
use crate::DateTime;
use itertools::Itertools;
use serde_derive::{Deserialize, Serialize};
use std::io::{self, Read, Write};
use tantivy_common::BinarySerializable;
use tantivy_common::DateTime;
use tantivy_common::VInt;
/// Tantivy's Document is the object that can
/// be indexed and then searched for.
@@ -169,7 +168,7 @@ impl BinarySerializable for Document {
#[cfg(test)]
mod tests {
use crate::*;
use crate::schema::*;
#[test]
fn test_doc() {
@@ -179,5 +178,4 @@ mod tests {
doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1);
}
}

View File

@@ -1,3 +1,4 @@
use crate::common::BinarySerializable;
use once_cell::sync::Lazy;
use regex::Regex;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -7,7 +8,6 @@ use std::fmt::{self, Debug, Display, Formatter};
use std::io::{self, Read, Write};
use std::str;
use std::string::FromUtf8Error;
use tantivy_common::BinarySerializable;
const SLASH_BYTE: u8 = b'/';
const ESCAPE_BYTE: u8 = b'\\';
@@ -59,7 +59,7 @@ impl Facet {
&self.0
}
pub fn from_encoded_string(facet_string: String) -> Facet {
pub(crate) fn from_encoded_string(facet_string: String) -> Facet {
Facet(facet_string)
}
@@ -104,7 +104,7 @@ impl Facet {
}
/// Accessor for the inner buffer of the `Facet`.
pub fn set_facet_str(&mut self, facet_str: &str) {
pub(crate) fn set_facet_str(&mut self, facet_str: &str) {
self.0.clear();
self.0.push_str(facet_str);
}

View File

@@ -1,8 +1,7 @@
use serde_derive::{Deserialize, Serialize};
use crate::common::BinarySerializable;
use std::io;
use std::io::Read;
use std::io::Write;
use tantivy_common::BinarySerializable;
/// `Field` is actually a `u8` identifying a `Field`
/// The schema is in charge of holding mapping between field names

View File

@@ -1,8 +1,7 @@
use serde_derive::*;
use crate::schema::IntOptions;
use crate::schema::TextOptions;
use crate::FieldType;
use crate::IntOptions;
use crate::TextOptions;
use crate::schema::FieldType;
use serde::de::{self, MapAccess, Visitor};
use serde::ser::SerializeStruct;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -266,7 +265,7 @@ impl<'de> Deserialize<'de> for FieldEntry {
#[cfg(test)]
mod tests {
use super::*;
use crate::TEXT;
use crate::schema::TEXT;
use serde_json;
#[test]

View File

@@ -1,10 +1,11 @@
use base64::decode;
use crate::Facet;
use crate::IndexRecordOption;
use crate::TextFieldIndexing;
use crate::Value;
use crate::{IntOptions, TextOptions};
use crate::schema::{IntOptions, TextOptions};
use crate::schema::Facet;
use crate::schema::IndexRecordOption;
use crate::schema::TextFieldIndexing;
use crate::schema::Value;
use serde_json::Value as JsonValue;
/// Possible error that may occur while parsing a field value
@@ -182,9 +183,8 @@ impl FieldType {
#[cfg(test)]
mod tests {
use super::FieldType;
use crate::field_type::ValueParsingError;
use crate::Value;
use serde_json::json;
use crate::schema::field_type::ValueParsingError;
use crate::schema::Value;
#[test]
fn test_bytes_value_from_json() {

View File

@@ -1,11 +1,9 @@
use crate::Field;
use crate::Value;
//use serde::Deserialize;
use serde_derive::{Deserialize, Serialize};
use crate::common::BinarySerializable;
use crate::schema::Field;
use crate::schema::Value;
use std::io;
use std::io::Read;
use std::io::Write;
use tantivy_common::BinarySerializable;
/// `FieldValue` holds together a `Field` and its `Value`.
#[derive(Debug, Clone, Ord, PartialEq, Eq, PartialOrd, Serialize, Deserialize)]

View File

@@ -1,5 +1,5 @@
use crate::IntOptions;
use crate::TextOptions;
use crate::schema::IntOptions;
use crate::schema::TextOptions;
use std::ops::BitOr;
#[derive(Clone)]

View File

@@ -1,5 +1,3 @@
use serde_derive::{Deserialize, Serialize};
/// `IndexRecordOption` describes an amount information associated
/// to a given indexed field.
///
@@ -31,22 +29,6 @@ pub enum IndexRecordOption {
}
impl IndexRecordOption {
/// Returns true iff the term frequency will be encoded.
pub fn is_termfreq_enabled(self) -> bool {
match self {
IndexRecordOption::WithFreqsAndPositions | IndexRecordOption::WithFreqs => true,
_ => false,
}
}
/// Returns true iff the term positions within the document are stored as well.
pub fn is_position_enabled(self) -> bool {
match self {
IndexRecordOption::WithFreqsAndPositions => true,
_ => false,
}
}
/// Returns true iff this option includes encoding
/// term frequencies.
pub fn has_freq(self) -> bool {

View File

@@ -1,5 +1,4 @@
use crate::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
use serde_derive::{Deserialize, Serialize};
use crate::schema::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
use std::ops::BitOr;
/// Express whether a field is single-value or multi-valued.

View File

@@ -26,7 +26,7 @@ directory.
### Example
```
use tantivy_schema::*;
use tantivy::schema::*;
let mut schema_builder = Schema::builder();
let title_options = TextOptions::default()
.set_stored()
@@ -59,7 +59,7 @@ when [`searcher.doc(doc_address)`](../struct.Searcher.html#method.doc) is called
### Example
```
use tantivy_schema::*;
use tantivy::schema::*;
let mut schema_builder = Schema::builder();
let num_stars_options = IntOptions::default()
.set_stored()
@@ -93,7 +93,7 @@ using the `|` operator.
For instance, a schema containing the two fields defined in the example above could be rewritten :
```
use tantivy_schema::*;
use tantivy::schema::*;
let mut schema_builder = Schema::builder();
schema_builder.add_u64_field("num_stars", INDEXED | STORED);
schema_builder.add_text_field("title", TEXT | STORED);
@@ -126,6 +126,7 @@ pub use self::schema::{Schema, SchemaBuilder};
pub use self::value::Value;
pub use self::facet::Facet;
pub(crate) use self::facet::FACET_SEP_BYTE;
pub use self::document::Document;
pub use self::field::Field;
@@ -173,5 +174,4 @@ mod tests {
assert!(!is_valid_field_name("シャボン玉"));
assert!(is_valid_field_name("my_text_field"));
}
}

View File

@@ -1,5 +1,4 @@
use crate::Value;
use serde_derive::Serialize;
use crate::schema::Value;
use std::collections::BTreeMap;
/// Internal representation of a document used for JSON

View File

@@ -1,14 +1,14 @@
use super::*;
use crate::schema::field_type::ValueParsingError;
use failure::Fail;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::sync::Arc;
use super::*;
use serde::de::{SeqAccess, Visitor};
use serde::ser::SerializeSeq;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::{self, Map as JsonObject, Value as JsonValue};
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
/// Tantivy has a very strict schema.
/// You need to specify in advance whether a field is indexed or not,
@@ -21,7 +21,7 @@ use std::sync::Arc;
/// # Examples
///
/// ```
/// use tantivy_schema::*;
/// use tantivy::schema::*;
///
/// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING);
@@ -208,7 +208,7 @@ impl Eq for InnerSchema {}
/// # Examples
///
/// ```
/// use tantivy_schema::*;
/// use tantivy::schema::*;
///
/// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING);

View File

@@ -1,11 +1,11 @@
use std::fmt;
use crate::Facet;
use crate::Field;
use super::Field;
use crate::common;
use crate::schema::Facet;
use crate::DateTime;
use byteorder::{BigEndian, ByteOrder};
use std::str;
use tantivy_common as common;
use tantivy_common::DateTime;
/// Size (in bytes) of the buffer of a int field.
const INT_TERM_LEN: usize = 4 + 8;
@@ -22,10 +22,10 @@ impl Term {
/// Builds a term given a field, and a i64-value
///
/// Assuming the term has a field id of 1, and a i64 value of 3234,
/// the Term will have 8 bytes.
/// the Term will have 12 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
/// The 8 following bytes are encoding the u64 value.
pub fn from_field_i64(field: Field, val: i64) -> Term {
let val_u64: u64 = common::i64_to_u64(val);
Term::from_field_u64(field, val_u64)
@@ -33,11 +33,11 @@ impl Term {
/// Builds a term given a field, and a f64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes. <= this is wrong
/// Assuming the term has a field id of 1, and a f64 value of 1.5,
/// the Term will have 12 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
/// The 8 following bytes are encoding the f64 as a u64 value.
pub fn from_field_f64(field: Field, val: f64) -> Term {
let val_u64: u64 = common::f64_to_u64(val);
Term::from_field_u64(field, val_u64)
@@ -46,10 +46,10 @@ impl Term {
/// Builds a term given a field, and a DateTime value
///
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
/// the Term will have 8 bytes.
/// the Term will have 12 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
/// The 8 following bytes are encoding the DateTime as i64 timestamp value.
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
let val_timestamp = val.timestamp();
Term::from_field_i64(field, val_timestamp)
@@ -82,10 +82,10 @@ impl Term {
/// Builds a term given a field, and a u64-value
///
/// Assuming the term has a field id of 1, and a u64 value of 3234,
/// the Term will have 8 bytes.
/// the Term will have 12 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the u64 value.
/// The 8 following bytes are encoding the u64 value.
pub fn from_field_u64(field: Field, val: u64) -> Term {
let mut term = Term(vec![0u8; INT_TERM_LEN]);
term.set_field(field);
@@ -94,7 +94,7 @@ impl Term {
}
/// Creates a new Term for a given field.
pub fn for_field(field: Field) -> Term {
pub(crate) fn for_field(field: Field) -> Term {
let mut term = Term(Vec::with_capacity(100));
term.set_field(field);
term
@@ -134,7 +134,7 @@ impl Term {
self.0.extend(bytes);
}
pub fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
pub(crate) fn from_field_bytes(field: Field, bytes: &[u8]) -> Term {
let mut term = Term::for_field(field);
term.set_bytes(bytes);
term
@@ -182,7 +182,7 @@ where
///
/// # Panics
/// ... or returns an invalid value
/// if the term is not a `i64` field.
/// if the term is not a `f64` field.
pub fn get_f64(&self) -> f64 {
common::u64_to_f64(BigEndian::read_u64(&self.0.as_ref()[4..]))
}
@@ -236,7 +236,7 @@ impl fmt::Debug for Term {
#[cfg(test)]
mod tests {
use crate::{Schema, Term, STRING};
use crate::schema::*;
#[test]
pub fn test_term() {

View File

@@ -1,7 +1,6 @@
use crate::flags::SchemaFlagList;
use crate::flags::StoredFlag;
use crate::IndexRecordOption;
use serde_derive::{Deserialize, Serialize};
use crate::schema::flags::SchemaFlagList;
use crate::schema::flags::StoredFlag;
use crate::schema::IndexRecordOption;
use std::borrow::Cow;
use std::ops::BitOr;
@@ -152,7 +151,7 @@ where
#[cfg(test)]
mod tests {
use crate::{FieldType, IndexRecordOption, Schema, STORED, TEXT};
use crate::schema::*;
#[test]
fn test_field_options() {

View File

@@ -1,11 +1,9 @@
use crate::Facet;
use chrono;
use crate::schema::Facet;
use crate::DateTime;
use serde::de::Visitor;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{cmp::Ordering, fmt};
pub(crate) type DateTime = chrono::DateTime<chrono::Utc>;
/// Value represents the value of a any field.
/// It is an enum over all over all of the possible field type.
#[derive(Debug, Clone, PartialEq, PartialOrd)]
@@ -220,11 +218,11 @@ impl From<Vec<u8>> for Value {
}
mod binary_serialize {
use crate::Facet;
use crate::Value;
use super::Value;
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
use crate::schema::Facet;
use chrono::{TimeZone, Utc};
use std::io::{self, Read, Write};
use tantivy_common::{f64_to_u64, u64_to_f64, BinarySerializable};
const TEXT_CODE: u8 = 0;
const U64_CODE: u8 = 1;

View File

@@ -120,17 +120,16 @@ pub mod tests {
);
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::write_lorem_ipsum_store;
use directory::Directory;
use directory::RAMDirectory;
use crate::directory::Directory;
use crate::directory::RAMDirectory;
use crate::store::StoreReader;
use std::path::Path;
use store::StoreReader;
use test::Bencher;
#[bench]

View File

@@ -165,5 +165,4 @@ mod tests {
assert_eq!(output.len(), 65);
assert_eq!(output[0], 128u8 + 3u8);
}
}

View File

@@ -3,6 +3,7 @@ use super::skiplist::SkipListBuilder;
use super::StoreReader;
use crate::common::CountingWriter;
use crate::common::{BinarySerializable, VInt};
use crate::directory::TerminatingWrite;
use crate::directory::WritePtr;
use crate::schema::Document;
use crate::DocId;
@@ -109,6 +110,6 @@ impl StoreWriter {
self.offset_index_writer.write(&mut self.writer)?;
header_offset.serialize(&mut self.writer)?;
self.doc.serialize(&mut self.writer)?;
self.writer.flush()
self.writer.terminate()
}
}

View File

@@ -268,7 +268,7 @@ mod tests {
#[test]
fn test_term_info_block() {
common::fixed_size_test::<TermInfoBlockMeta>();
common::test::fixed_size_test::<TermInfoBlockMeta>();
}
#[test]
@@ -328,5 +328,4 @@ mod tests {
assert_eq!(term_info_store.get(i as u64), term_infos[i]);
}
}
}

View File

@@ -1,6 +1,6 @@
//! # Example
//! ```rust
//! use tantivy_tokenizer::*;
//! use tantivy::tokenizer::*;
//!
//! # fn main() {
//!
@@ -64,6 +64,14 @@ impl<TailTokenStream> TokenStream for AlphaNumOnlyFilterStream<TailTokenStream>
where
TailTokenStream: TokenStream,
{
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
fn advance(&mut self) -> bool {
while self.tail.advance() {
if self.predicate(self.tail.token()) {
@@ -73,12 +81,4 @@ where
false
}
fn token(&self) -> &Token {
self.tail.token()
}
fn token_mut(&mut self) -> &mut Token {
self.tail.token_mut()
}
}

View File

@@ -1558,11 +1558,11 @@ fn to_ascii(text: &mut String, output: &mut String) {
#[cfg(test)]
mod tests {
use super::to_ascii;
use crate::AsciiFoldingFilter;
use crate::RawTokenizer;
use crate::SimpleTokenizer;
use crate::TokenStream;
use crate::Tokenizer;
use crate::tokenizer::AsciiFoldingFilter;
use crate::tokenizer::RawTokenizer;
use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::TokenStream;
use crate::tokenizer::Tokenizer;
use std::iter;
#[test]

View File

@@ -1,5 +1,5 @@
use super::{Token, TokenStream, Tokenizer};
use crate::FACET_SEP_BYTE;
use crate::schema::FACET_SEP_BYTE;
/// The `FacetTokenizer` process a `Facet` binary representation
/// and emits a token for all of its parent.
@@ -83,8 +83,8 @@ impl<'a> TokenStream for FacetTokenStream<'a> {
mod tests {
use super::FacetTokenizer;
use crate::schema::Facet;
use crate::tokenizer::{Token, TokenStream, Tokenizer};
use tantivy_schema::Facet;
#[test]
fn test_facet_tokenizer() {

View File

@@ -72,10 +72,10 @@ where
#[cfg(test)]
mod tests {
use crate::LowerCaser;
use crate::SimpleTokenizer;
use crate::TokenStream;
use crate::Tokenizer;
use crate::tokenizer::LowerCaser;
use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::TokenStream;
use crate::tokenizer::Tokenizer;
#[test]
fn test_to_lower_case() {
@@ -98,10 +98,6 @@ mod tests {
#[test]
fn test_lowercaser() {
assert_eq!(lowercase_helper("Tree"), vec!["tree".to_string()]);
assert_eq!(
lowercase_helper("Русский"),
vec!["русский".to_string()]
);
assert_eq!(lowercase_helper("Русский"), vec!["русский".to_string()]);
}
}

View File

@@ -5,7 +5,7 @@
//! each of your fields :
//!
//! ```rust
//! use tantivy_schema::*;
//! use tantivy::schema::*;
//!
//! # fn main() {
//! let mut schema_builder = Schema::builder();
@@ -64,7 +64,7 @@
//! For instance, the `en_stem` is defined as follows.
//!
//! ```rust
//! use tantivy_tokenizer::*;
//! use tantivy::tokenizer::*;
//!
//! # fn main() {
//! let en_stem = SimpleTokenizer
@@ -78,8 +78,8 @@
//! register it with a name in your index's [`TokenizerManager`](./struct.TokenizerManager.html).
//!
//! ```rust
//! # use tantivy_schema::Schema;
//! # use tantivy_tokenizer::*;
//! # use tantivy::schema::Schema;
//! # use tantivy::tokenizer::*;
//! # use tantivy::Index;
//! # fn main() {
//! # let custom_en_tokenizer = SimpleTokenizer;
@@ -98,8 +98,8 @@
//! # Example
//!
//! ```rust
//! use tantivy_schema::{Schema, IndexRecordOption, TextOptions, TextFieldIndexing};
//! use tantivy_tokenizer::*;
//! use tantivy::schema::{Schema, IndexRecordOption, TextOptions, TextFieldIndexing};
//! use tantivy::tokenizer::*;
//! use tantivy::Index;
//!
//! # fn main() {
@@ -150,10 +150,8 @@ pub use self::simple_tokenizer::SimpleTokenizer;
pub use self::stemmer::{Language, Stemmer};
pub use self::stop_word_filter::StopWordFilter;
pub(crate) use self::token_stream_chain::TokenStreamChain;
pub use self::tokenizer::BoxedTokenizer;
pub(crate) const FACET_SEP_BYTE: u8 = 0u8;
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
pub use self::tokenizer_manager::TokenizerManager;
@@ -283,5 +281,4 @@ pub mod tests {
assert!(tokens.is_empty());
}
}
}

View File

@@ -30,7 +30,7 @@ use super::{Token, TokenStream, Tokenizer};
/// # Example
///
/// ```rust
/// use tantivy_tokenizer::*;
/// use tantivy::tokenizer::*;
/// # fn main() {
/// let tokenizer = NgramTokenizer::new(2, 3, false);
/// let mut stream = tokenizer.token_stream("hello");
@@ -308,9 +308,9 @@ mod tests {
use super::CodepointFrontiers;
use super::NgramTokenizer;
use super::StutteringIterator;
use crate::tests::assert_token;
use crate::tokenizer::{TokenStream, Tokenizer};
use crate::Token;
use crate::tokenizer::tests::assert_token;
use crate::tokenizer::tokenizer::{TokenStream, Tokenizer};
use crate::tokenizer::Token;
fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> {
let mut tokens: Vec<Token> = vec![];
@@ -460,5 +460,4 @@ mod tests {
assert_eq!(it.next(), Some((8, 9)));
assert_eq!(it.next(), None);
}
}

View File

@@ -1,6 +1,6 @@
//! # Example
//! ```rust
//! use tantivy_tokenizer::*;
//! use tantivy::tokenizer::*;
//!
//! # fn main() {
//!

View File

@@ -1,6 +1,5 @@
use super::{Token, TokenFilter, TokenStream};
use rust_stemmers::{self, Algorithm};
use serde_derive::{Deserialize, Serialize};
/// Available stemmer languages.
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]

View File

@@ -1,6 +1,6 @@
//! # Example
//! ```rust
//! use tantivy_tokenizer::*;
//! use tantivy::tokenizer::*;
//!
//! # fn main() {
//! let tokenizer = SimpleTokenizer

View File

@@ -1,4 +1,4 @@
use crate::{Token, TokenStream};
use crate::tokenizer::{Token, TokenStream};
const POSITION_GAP: usize = 2;
@@ -97,5 +97,4 @@ mod tests {
assert!(!token_chain.advance());
}
}

View File

@@ -1,4 +1,4 @@
use crate::TokenStreamChain;
use crate::tokenizer::TokenStreamChain;
/// The tokenizer module contains all of the tools used to process
/// text in `tantivy`.
use std::borrow::{Borrow, BorrowMut};
@@ -56,7 +56,7 @@ pub trait Tokenizer<'a>: Sized + Clone {
/// # Example
///
/// ```rust
/// use tantivy_tokenizer::*;
/// use tantivy::tokenizer::*;
///
/// # fn main() {
/// let en_stem = SimpleTokenizer
@@ -186,7 +186,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// # Example
///
/// ```
/// use tantivy_tokenizer::*;
/// use tantivy::tokenizer::*;
///
/// # fn main() {
/// let tokenizer = SimpleTokenizer
@@ -227,7 +227,7 @@ pub trait TokenStream {
/// and `.token()`.
///
/// ```
/// # use tantivy_tokenizer::*;
/// # use tantivy::tokenizer::*;
/// #
/// # fn main() {
/// # let tokenizer = SimpleTokenizer

View File

@@ -1,11 +1,11 @@
use crate::stemmer::Language;
use crate::BoxedTokenizer;
use crate::LowerCaser;
use crate::RawTokenizer;
use crate::RemoveLongFilter;
use crate::SimpleTokenizer;
use crate::Stemmer;
use crate::Tokenizer;
use crate::tokenizer::stemmer::Language;
use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::LowerCaser;
use crate::tokenizer::RawTokenizer;
use crate::tokenizer::RemoveLongFilter;
use crate::tokenizer::SimpleTokenizer;
use crate::tokenizer::Stemmer;
use crate::tokenizer::Tokenizer;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
@@ -32,10 +32,11 @@ impl TokenizerManager {
where
A: Into<BoxedTokenizer>,
{
let boxed_tokenizer = tokenizer.into();
self.tokenizers
.write()
.expect("Acquiring the lock should never fail")
.insert(tokenizer_name.to_string(), tokenizer.into());
.insert(tokenizer_name.to_string(), boxed_tokenizer);
}
/// Accessing a tokenizer given its name.

View File

@@ -1,10 +0,0 @@
[package]
name = "tantivy-common"
version = "0.1.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
edition = "2018"
workspace = ".."
[dependencies]
byteorder = "*"
chrono = "*"

View File

@@ -1,235 +0,0 @@
use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::common::VInt;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
use std::collections::HashMap;
use std::io::Write;
use std::io::{self, Read};
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
pub struct FileAddr {
field: Field,
idx: usize,
}
impl FileAddr {
fn new(field: Field, idx: usize) -> FileAddr {
FileAddr { field, idx }
}
}
impl BinarySerializable for FileAddr {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.field.serialize(writer)?;
VInt(self.idx as u64).serialize(writer)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let field = Field::deserialize(reader)?;
let idx = VInt::deserialize(reader)?.0 as usize;
Ok(FileAddr { field, idx })
}
}
/// A `CompositeWrite` is used to write a `CompositeFile`.
pub struct CompositeWrite<W = WritePtr> {
write: CountingWriter<W>,
offsets: HashMap<FileAddr, u64>,
}
impl<W: Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file
/// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> {
CompositeWrite {
write: CountingWriter::wrap(w),
offsets: HashMap::new(),
}
}
/// Start writing a new field.
pub fn for_field(&mut self, field: Field) -> &mut CountingWriter<W> {
self.for_field_with_idx(field, 0)
}
/// Start writing a new field.
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
let offset = self.write.written_bytes();
let file_addr = FileAddr::new(field, idx);
assert!(!self.offsets.contains_key(&file_addr));
self.offsets.insert(file_addr, offset);
&mut self.write
}
/// Close the composite file
///
/// An index of the different field offsets
/// will be written as a footer.
pub fn close(mut self) -> io::Result<()> {
let footer_offset = self.write.written_bytes();
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
let mut offset_fields: Vec<_> = self
.offsets
.iter()
.map(|(file_addr, offset)| (*offset, *file_addr))
.collect();
offset_fields.sort();
let mut prev_offset = 0;
for (offset, file_addr) in offset_fields {
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
file_addr.serialize(&mut self.write)?;
prev_offset = offset;
}
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?;
self.write.flush()?;
Ok(())
}
}
/// A composite file is an abstraction to store a
/// file partitioned by field.
///
/// The file needs to be written field by field.
/// A footer describes the start and stop offsets
/// for each field.
#[derive(Clone)]
pub struct CompositeFile {
data: ReadOnlySource,
offsets_index: HashMap<FileAddr, (usize, usize)>,
}
impl CompositeFile {
/// Opens a composite file stored in a given
/// `ReadOnlySource`.
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
let end = data.len();
let footer_len_data = data.slice_from(end - 4);
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
let footer_start = end - 4 - footer_len;
let footer_data = data.slice(footer_start, footer_start + footer_len);
let mut footer_buffer = footer_data.as_slice();
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
let mut file_addrs = vec![];
let mut offsets = vec![];
let mut field_index = HashMap::new();
let mut offset = 0;
for _ in 0..num_fields {
offset += VInt::deserialize(&mut footer_buffer)?.0 as usize;
let file_addr = FileAddr::deserialize(&mut footer_buffer)?;
offsets.push(offset);
file_addrs.push(file_addr);
}
offsets.push(footer_start);
for i in 0..num_fields {
let file_addr = file_addrs[i];
let start_offset = offsets[i];
let end_offset = offsets[i + 1];
field_index.insert(file_addr, (start_offset, end_offset));
}
Ok(CompositeFile {
data: data.slice_to(footer_start),
offsets_index: field_index,
})
}
/// Returns a composite file that stores
/// no fields.
pub fn empty() -> CompositeFile {
CompositeFile {
offsets_index: HashMap::new(),
data: ReadOnlySource::empty(),
}
}
/// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
self.open_read_with_idx(field, 0)
}
/// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
self.offsets_index
.get(&FileAddr { field, idx })
.map(|&(from, to)| self.data.slice(from, to))
}
pub fn space_usage(&self) -> PerFieldSpaceUsage {
let mut fields = HashMap::new();
for (&field_addr, &(start, end)) in self.offsets_index.iter() {
fields
.entry(field_addr.field)
.or_insert_with(|| FieldUsage::empty(field_addr.field))
.add_field_idx(field_addr.idx, end - start);
}
PerFieldSpaceUsage::new(fields)
}
}
#[cfg(test)]
mod test {
use super::{CompositeFile, CompositeWrite};
use crate::common::BinarySerializable;
use crate::common::VInt;
use crate::directory::{Directory, RAMDirectory};
use crate::schema::Field;
use std::io::Write;
use std::path::Path;
#[test]
fn test_composite_file() {
let path = Path::new("test_path");
let mut directory = RAMDirectory::create();
{
let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w);
{
let mut write_0 = composite_write.for_field(Field(0u32));
VInt(32431123u64).serialize(&mut write_0).unwrap();
write_0.flush().unwrap();
}
{
let mut write_4 = composite_write.for_field(Field(4u32));
VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap();
}
composite_write.close().unwrap();
}
{
let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r).unwrap();
{
let file0 = composite_file.open_read(Field(0u32)).unwrap();
let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64);
}
{
let file4 = composite_file.open_read(Field(4u32)).unwrap();
let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0);
assert_eq!(payload_4, 2u64);
}
}
}
}

View File

@@ -1,33 +0,0 @@
[package]
name = "tantivy-schema"
version = "0.1.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
edition = "2018"
workspace = ".."
[dependencies]
base64 = "0.10.0"
byteorder = "1.0"
once_cell = "0.2"
regex = "1.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
num_cpus = "1.2"
itertools = "0.8"
notify = {version="4", optional=true}
crossbeam = "0.7"
owning_ref = "0.4"
stable_deref_trait = "1.0.0"
downcast-rs = { version="1.0" }
census = "0.2"
failure = "0.1"
fail = "0.3"
scoped-pool = "1.0"
tantivy-common = {path="../tantivy-common"}
chrono = "*"
[dev-dependencies]
matches = "0.1.8"

View File

@@ -1,13 +0,0 @@
[package]
name = "tantivy-tokenizer"
version = "0.1.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
edition = "2018"
workspace = ".."
[dependencies]
fnv = "*"
rust-stemmers = "*"
serde = "*"
serde_derive = "*"
tantivy-schema = {path="../tantivy-schema"}

View File

@@ -1,7 +1,7 @@
use fail;
use std::io::Write;
use std::path::Path;
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory};
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite};
use tantivy::doc;
use tantivy::schema::{Schema, TEXT};
use tantivy::{Index, Term};
@@ -17,7 +17,7 @@ fn test_failpoints_managed_directory_gc_if_delete_fails() {
managed_directory
.open_write(test_path)
.unwrap()
.flush()
.terminate()
.unwrap();
assert!(managed_directory.exists(test_path));
// triggering gc and setting the delete operation to fail.