Compare commits

..

1 Commits

Author SHA1 Message Date
Paul Masurel
f2b8c030d5 macos ci 2019-09-13 10:16:42 +09:00
45 changed files with 143 additions and 463 deletions

View File

@@ -41,8 +41,8 @@ matrix:
- env: TARGET=x86_64-unknown-linux-gnu CODECOV=1 #UPLOAD_DOCS=1
# - env: TARGET=x86_64-unknown-linux-musl CODECOV=1
# OSX
#- env: TARGET=x86_64-apple-darwin
# os: osx
- env: TARGET=x86_64-apple-darwin
os: osx
before_install:
- set -e

View File

@@ -7,8 +7,7 @@ Tantivy 0.11.0
- Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- Avoid rebuilding Regex automaton whenever a regex query is reused. #630 (@brainlock)
## How to update?
@@ -16,12 +15,6 @@ Tantivy 0.11.0
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required.
Tantivy 0.10.2
=====================
- Closes #656. Solving memory leak.
Tantivy 0.10.1
=====================

View File

@@ -15,7 +15,6 @@ edition = "2018"
[dependencies]
base64 = "0.10.0"
byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]}
tantivy-fst = "0.1"

View File

@@ -123,4 +123,5 @@ mod tests {
assert_eq!(count_collector.harvest(), 2);
}
}
}

View File

@@ -599,18 +599,19 @@ mod tests {
);
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use crate::collector::FacetCollector;
use crate::query::AllQuery;
use crate::schema::{Facet, Schema};
use crate::Index;
use rand::seq::SliceRandom;
use rand::thread_rng;
use collector::FacetCollector;
use query::AllQuery;
use rand::{thread_rng, Rng};
use schema::Facet;
use schema::Schema;
use test::Bencher;
use Index;
#[bench]
fn bench_facet_collector(b: &mut Bencher) {
@@ -627,7 +628,7 @@ mod bench {
}
}
// 40425 docs
docs[..].shuffle(&mut thread_rng());
thread_rng().shuffle(&mut docs[..]);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs {
@@ -636,7 +637,7 @@ mod bench {
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
b.iter(|| {
let searcher = reader.searcher();
let searcher = index.searcher();
let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap();
});

View File

@@ -592,4 +592,5 @@ mod tests {
let query = query_parser.parse_query(query).unwrap();
(index, query)
}
}

View File

@@ -2,7 +2,7 @@ use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::common::VInt;
use crate::directory::ReadOnlySource;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
@@ -42,7 +42,7 @@ pub struct CompositeWrite<W = WritePtr> {
offsets: HashMap<FileAddr, u64>,
}
impl<W: TerminatingWrite + Write> CompositeWrite<W> {
impl<W: Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file
/// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> {
@@ -91,7 +91,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?;
self.write.terminate()
self.write.flush()?;
Ok(())
}
}
@@ -230,4 +231,5 @@ mod test {
}
}
}
}

View File

@@ -1,5 +1,3 @@
use crate::directory::AntiCallToken;
use crate::directory::TerminatingWrite;
use std::io;
use std::io::Write;
@@ -44,13 +42,6 @@ impl<W: Write> Write for CountingWriter<W> {
}
}
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.underlying.terminate_ref(token)
}
}
#[cfg(test)]
mod test {

View File

@@ -199,7 +199,10 @@ pub mod test {
fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1);
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8);
assert_eq!(
serialize_test(String::from("富士さん見える。")),
1 + 3 * 8
);
}
#[test]

View File

@@ -26,10 +26,9 @@ use crate::IndexWriter;
use crate::Result;
use num_cpus;
use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::fmt;
#[cfg(feature = "mmap")]
use std::path::{Path, PathBuf};
use std::path::Path;
use std::sync::Arc;
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
@@ -369,11 +368,6 @@ impl Index {
.map(SegmentMeta::id)
.collect())
}
/// Returns the set of corrupted files
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
self.directory.list_damaged().map_err(Into::into)
}
}
impl fmt::Debug for Index {
@@ -601,4 +595,5 @@ mod tests {
assert_eq!(searcher.num_docs(), 8_000);
assert!(mem_right_after_merge_finished < mem_right_after_commit);
}
}

View File

@@ -118,8 +118,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file
@@ -159,8 +157,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data.

View File

@@ -1,213 +0,0 @@
use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite};
use byteorder::{ByteOrder, LittleEndian};
use crc32fast::Hasher;
use std::io;
use std::io::Write;
const COMMON_FOOTER_SIZE: usize = 4 * 5;
#[derive(Debug, Clone, PartialEq)]
pub struct Footer {
pub tantivy_version: (u32, u32, u32),
pub meta: String,
pub versioned_footer: VersionedFooter,
}
impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self {
let tantivy_version = (
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer {
tantivy_version,
meta: format!(
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer,
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut res = self.versioned_footer.to_bytes();
res.extend_from_slice(self.meta.as_bytes());
let len = res.len();
res.resize(len + COMMON_FOOTER_SIZE, 0);
let mut common_footer = &mut res[len..];
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
}
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The footer len is {}, while the entire file len is {}",
size, len
),
));
}
let footer = &data[len - size as usize..];
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
Ok(Footer {
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
}
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
let footer = Footer::from_bytes(source.as_slice())?;
let reader = source.slice_to(source.as_slice().len() - footer.size());
Ok((footer, reader))
}
pub fn size(&self) -> usize {
self.versioned_footer.size() as usize + self.meta.len() + 20
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion { version: u32, size: u32 },
V0(u32), // crc
}
impl VersionedFooter {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
}
}
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
assert!(footer.len() >= 4);
let version = LittleEndian::read_u32(footer);
match version {
0 => {
if footer.len() == 8 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
} else {
Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The versioned footer len is {}, while it should be 8",
footer.len()
),
))
}
}
version => Ok(VersionedFooter::UnknownVersion {
version,
size: footer.len() as u32,
}),
}
}
pub fn size(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 8,
VersionedFooter::UnknownVersion { size, .. } => *size,
}
}
pub fn version(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 0,
VersionedFooter::UnknownVersion { version, .. } => *version,
}
}
pub fn crc(&self) -> Option<u32> {
match self {
VersionedFooter::V0(crc) => Some(*crc),
VersionedFooter::UnknownVersion { .. } => None,
}
}
}
pub(crate) struct FooterProxy<W: TerminatingWrite> {
/// always Some except after terminate call
hasher: Option<Hasher>,
/// always Some except after terminate call
writer: Option<W>,
}
impl<W: TerminatingWrite> FooterProxy<W> {
pub fn new(writer: W) -> Self {
FooterProxy {
hasher: Some(Hasher::new()),
writer: Some(writer),
}
}
}
impl<W: TerminatingWrite> Write for FooterProxy<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let count = self.writer.as_mut().unwrap().write(buf)?;
self.hasher.as_mut().unwrap().update(&buf[..count]);
Ok(count)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.as_mut().unwrap().flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
let mut writer = self.writer.take().unwrap();
writer.write_all(&footer)?;
writer.terminate()
}
}
#[cfg(test)]
mod tests {
use crate::directory::footer::{Footer, VersionedFooter};
#[test]
fn test_serialize_deserialize_footer() {
let crc = 123456;
let footer = Footer::new(VersionedFooter::V0(crc));
let footer_bytes = footer.to_bytes();
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
}
}

View File

@@ -1,6 +1,5 @@
use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock;
use crate::directory::Lock;
use crate::directory::META_LOCK;
@@ -9,7 +8,6 @@ use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption;
use crate::Directory;
use crate::Result;
use crc32fast::Hasher;
use serde_json;
use std::collections::HashSet;
use std::io;
@@ -209,59 +207,17 @@ impl ManagedDirectory {
}
Ok(())
}
/// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?;
let (footer, data) = Footer::extract_footer(reader)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
let mut hasher = Hasher::new();
hasher.update(data.as_slice());
let crc = hasher.finalize();
Ok(footer
.versioned_footer
.crc()
.map(|v| v == crc)
.unwrap_or(false))
}
/// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations
.read()
.expect("Managed directory rlock poisoned in list damaged.")
.managed_paths
.clone();
for path in managed_paths.into_iter() {
if !self.validate_checksum(&path)? {
hashset.insert(path);
}
}
Ok(hashset)
}
}
impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
self.directory.open_read(path)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new(
self.directory
.open_write(path)?
.into_inner()
.map_err(|_| ())
.expect("buffer should be empty"),
))))
self.directory.open_write(path)
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -303,9 +259,8 @@ impl Clone for ManagedDirectory {
#[cfg(test)]
mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
use crate::directory::{Directory, ManagedDirectory, MmapDirectory};
use std::collections::HashSet;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use tempfile::TempDir;
@@ -320,8 +275,8 @@ mod tests_mmap_specific {
{
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let write_file = managed_directory.open_write(test_path1).unwrap();
write_file.terminate().unwrap();
let mut write_file = managed_directory.open_write(test_path1).unwrap();
write_file.flush().unwrap();
managed_directory
.atomic_write(test_path2, &[0u8, 1u8])
.unwrap();
@@ -355,9 +310,9 @@ mod tests_mmap_specific {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
managed_directory
.atomic_write(test_path1, &vec![0u8, 1u8])
.unwrap();
assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
@@ -376,38 +331,4 @@ mod tests_mmap_specific {
}
}
#[test]
fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap();
assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2);
let mut file = OpenOptions::new()
.write(true)
.open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file);
let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2));
}
}

View File

@@ -11,7 +11,6 @@ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
};
use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory;
use crate::directory::DirectoryLock;
use crate::directory::Lock;
@@ -19,7 +18,7 @@ use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle;
use crate::directory::{TerminatingWrite, WritePtr};
use crate::directory::WritePtr;
use atomicwrites;
use memmap::Mmap;
use std::collections::HashMap;
@@ -142,28 +141,42 @@ impl MmapCache {
}
}
struct WatcherWrapper {
struct InnerWatcherWrapper {
_watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: Arc<WatchCallbackList>,
watcher_router: WatchCallbackList,
}
impl InnerWatcherWrapper {
pub fn new(path: &Path) -> Result<(Self, Receiver<notify::RawEvent>), notify::Error> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let mut watcher = notify::raw_watcher(tx)?;
watcher.watch(path, RecursiveMode::Recursive)?;
let inner = InnerWatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router: Default::default(),
};
Ok((inner, watcher_recv))
}
}
#[derive(Clone)]
struct WatcherWrapper {
inner: Arc<InnerWatcherWrapper>,
}
impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let watcher = notify::raw_watcher(tx)
.and_then(|mut watcher| {
watcher.watch(path, RecursiveMode::Recursive)?;
Ok(watcher)
})
.map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_router: Arc<WatchCallbackList> = Default::default();
let watcher_router_clone = watcher_router.clone();
let (inner, watcher_recv) = InnerWatcherWrapper::new(path).map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_wrapper = WatcherWrapper {
inner: Arc::new(inner),
};
let watcher_wrapper_clone = watcher_wrapper.clone();
thread::Builder::new()
.name("meta-file-watch-thread".to_string())
.spawn(move || {
@@ -174,7 +187,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH {
watcher_router_clone.broadcast();
watcher_wrapper_clone.inner.watcher_router.broadcast();
}
}
}
@@ -187,15 +200,13 @@ impl WatcherWrapper {
}
}
}
})?;
Ok(WatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router,
})
})
.expect("Failed to spawn thread to watch meta.json");
Ok(watcher_wrapper)
}
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
self.watcher_router.subscribe(watch_callback)
self.inner.watcher_router.subscribe(watch_callback)
}
}
@@ -401,12 +412,6 @@ impl Seek for SafeFileWriter {
}
}
impl TerminatingWrite for SafeFileWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);

View File

@@ -9,7 +9,6 @@ mod mmap_directory;
mod directory;
mod directory_lock;
mod footer;
mod managed_directory;
mod ram_directory;
mod read_only_source;
@@ -25,49 +24,18 @@ pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write};
use std::io::{BufWriter, Write};
#[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
/// Write object for Directory.
///
/// `WritePtr` are required to implement both Write
/// and Seek.
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>;
pub type WritePtr = BufWriter<Box<dyn Write>>;
#[cfg(test)]
mod tests;

View File

@@ -1,9 +1,8 @@
use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList;
use crate::directory::WritePtr;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr};
use fail::fail_point;
use std::collections::HashMap;
use std::fmt;
@@ -72,12 +71,6 @@ impl Write for VecWriter {
}
}
impl TerminatingWrite for VecWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[derive(Default)]
struct InnerDirectory {
fs: HashMap<PathBuf, ReadOnlySource>,

View File

@@ -127,7 +127,7 @@ fn test_watch(directory: &mut dyn Directory) {
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok());
for _ in 0..1_000 {
for _ in 0..100 {
if counter.load(Ordering::SeqCst) > i {
break;
}

View File

@@ -152,4 +152,5 @@ mod tests {
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst));
}
}

View File

@@ -429,6 +429,7 @@ mod tests {
}
}
}
}
#[cfg(all(test, feature = "unstable"))]
@@ -436,9 +437,9 @@ mod bench {
use super::tests::FIELD;
use super::tests::{generate_permutation, SCHEMA};
use super::*;
use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader;
use common::CompositeFile;
use directory::{Directory, RAMDirectory, WritePtr};
use fastfield::FastFieldReader;
use std::collections::HashMap;
use std::path::Path;
use test::{self, Bencher};
@@ -536,4 +537,5 @@ mod bench {
});
}
}
}

View File

@@ -8,7 +8,6 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta;
use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::directory::TerminatingWrite;
use crate::docset::DocSet;
use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset;
@@ -169,7 +168,6 @@ pub(crate) fn advance_deletes(
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, &mut delete_file)?;
delete_file.terminate()?;
}
}
segment_entry.set_meta(segment.meta().clone());
@@ -1179,4 +1177,5 @@ mod tests {
assert!(clear_again.is_ok());
assert!(commit_again.is_ok());
}
}

View File

@@ -134,4 +134,5 @@ mod tests {
}
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
}
}

View File

@@ -296,4 +296,5 @@ mod tests {
assert_eq!(initial_table_size(10_000_000).unwrap(), 17);
assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19);
}
}

View File

@@ -274,15 +274,13 @@ pub mod tests {
mod bench {
use super::*;
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
use rand::{Rng, XorShiftRng};
use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let mut seed: [u8; 32] = [0; 32];
seed[31] = seed_val;
let mut rng = StdRng::from_seed(seed);
let seed: &[u8; 16] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, seed_val];
let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
}

View File

@@ -622,23 +622,23 @@ pub mod tests {
assert!(!postings_unopt.advance());
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::*;
use crate::docset::SkipResult;
use crate::query::Intersection;
use crate::schema::IndexRecordOption;
use crate::tests;
use crate::DocSet;
use docset::SkipResult;
use query::Intersection;
use schema::IndexRecordOption;
use test::{self, Bencher};
use tests;
use DocSet;
#[bench]
fn bench_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
@@ -652,8 +652,7 @@ mod bench {
#[bench]
fn bench_segment_intersection(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let segment_postings_a = segment_reader
@@ -683,8 +682,7 @@ mod bench {
}
fn bench_skip_next(p: f64, b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
let docs = tests::sample(segment_reader.num_docs(), p);
@@ -739,8 +737,7 @@ mod bench {
#[bench]
fn bench_iterate_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap();
let searcher = reader.searcher();
let searcher = INDEX.searcher();
let segment_reader = searcher.segment_reader(0);
b.iter(|| {
let n: u32 = test::black_box(17);

View File

@@ -310,7 +310,6 @@ mod bench {
use super::super::MemoryArena;
use super::ExpUnrolledLinkedList;
use byteorder::{NativeEndian, WriteBytesExt};
use std::iter;
use test::Bencher;
const NUM_STACK: usize = 10_000;
@@ -336,10 +335,11 @@ mod bench {
fn bench_push_stack(bench: &mut Bencher) {
bench.iter(|| {
let mut heap = MemoryArena::new();
let mut stacks: Vec<ExpUnrolledLinkedList> =
iter::repeat_with(ExpUnrolledLinkedList::new)
.take(NUM_STACK)
.collect();
let mut stacks = Vec::with_capacity(100);
for _ in 0..NUM_STACK {
let mut stack = ExpUnrolledLinkedList::new();
stacks.push(stack);
}
for s in 0..NUM_STACK {
for i in 0u32..STACK_SIZE {
let t = s * 392017 % NUM_STACK;

View File

@@ -130,4 +130,5 @@ mod tests {
assert!(!scorer.advance());
}
}
}

View File

@@ -216,6 +216,7 @@ mod tests {
assert!(!docset.advance());
}
}
}
#[cfg(all(test, feature = "unstable"))]
@@ -223,12 +224,13 @@ mod bench {
use super::BitSet;
use super::BitSetDocSet;
use crate::test;
use crate::tests;
use crate::DocSet;
use test;
use tests;
use DocSet;
#[bench]
fn bench_bitset_1pct_insert(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
b.iter(|| {
let mut bitset = BitSet::with_max_value(1_000_000);
@@ -240,6 +242,7 @@ mod bench {
#[bench]
fn bench_bitset_1pct_clone(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
let mut bitset = BitSet::with_max_value(1_000_000);
for el in els {

View File

@@ -137,4 +137,5 @@ mod tests {
fn test_idf() {
assert_nearly_equals(idf(1, 2), 0.6931472);
}
}

View File

@@ -247,7 +247,9 @@ mod tests {
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap();
let query = query_parser
.parse_query("Оксана Лифенко")
.unwrap();
let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
scorer.advance();

View File

@@ -175,4 +175,5 @@ mod tests {
sample_skip,
);
}
}

View File

@@ -479,4 +479,5 @@ mod tests {
91
);
}
}

View File

@@ -190,4 +190,5 @@ mod tests {
skip_docs,
);
}
}

View File

@@ -409,17 +409,20 @@ mod tests {
vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000],
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use crate::query::score_combiner::DoNothingCombiner;
use crate::query::{ConstScorer, Union, VecDocSet};
use crate::tests;
use crate::DocId;
use crate::DocSet;
use query::score_combiner::DoNothingCombiner;
use query::ConstScorer;
use query::Union;
use query::VecDocSet;
use test::Bencher;
use tests;
use DocId;
use DocSet;
#[bench]
fn bench_union_3_high(bench: &mut Bencher) {

View File

@@ -82,4 +82,5 @@ pub mod tests {
}
assert_eq!(postings.fill_buffer(&mut buffer[..]), 9);
}
}

View File

@@ -178,4 +178,5 @@ mod tests {
doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1);
}
}

View File

@@ -174,4 +174,5 @@ mod tests {
assert!(!is_valid_field_name("シャボン玉"));
assert!(is_valid_field_name("my_text_field"));
}
}

View File

@@ -120,16 +120,17 @@ pub mod tests {
);
}
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::tests::write_lorem_ipsum_store;
use crate::directory::Directory;
use crate::directory::RAMDirectory;
use crate::store::StoreReader;
use directory::Directory;
use directory::RAMDirectory;
use std::path::Path;
use store::StoreReader;
use test::Bencher;
#[bench]

View File

@@ -165,4 +165,5 @@ mod tests {
assert_eq!(output.len(), 65);
assert_eq!(output[0], 128u8 + 3u8);
}
}

View File

@@ -3,7 +3,6 @@ use super::skiplist::SkipListBuilder;
use super::StoreReader;
use crate::common::CountingWriter;
use crate::common::{BinarySerializable, VInt};
use crate::directory::TerminatingWrite;
use crate::directory::WritePtr;
use crate::schema::Document;
use crate::DocId;
@@ -110,6 +109,6 @@ impl StoreWriter {
self.offset_index_writer.write(&mut self.writer)?;
header_offset.serialize(&mut self.writer)?;
self.doc.serialize(&mut self.writer)?;
self.writer.terminate()
self.writer.flush()
}
}

View File

@@ -328,4 +328,5 @@ mod tests {
assert_eq!(term_info_store.get(i as u64), term_infos[i]);
}
}
}

View File

@@ -98,6 +98,10 @@ mod tests {
#[test]
fn test_lowercaser() {
assert_eq!(lowercase_helper("Tree"), vec!["tree".to_string()]);
assert_eq!(lowercase_helper("Русский"), vec!["русский".to_string()]);
assert_eq!(
lowercase_helper("Русский"),
vec!["русский".to_string()]
);
}
}

View File

@@ -281,4 +281,5 @@ pub mod tests {
assert!(tokens.is_empty());
}
}
}

View File

@@ -460,4 +460,5 @@ mod tests {
assert_eq!(it.next(), Some((8, 9)));
assert_eq!(it.next(), None);
}
}

View File

@@ -97,4 +97,5 @@ mod tests {
assert!(!token_chain.advance());
}
}

View File

@@ -1,7 +1,7 @@
use fail;
use std::io::Write;
use std::path::Path;
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite};
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory};
use tantivy::doc;
use tantivy::schema::{Schema, TEXT};
use tantivy::{Index, Term};
@@ -17,7 +17,7 @@ fn test_failpoints_managed_directory_gc_if_delete_fails() {
managed_directory
.open_write(test_path)
.unwrap()
.terminate()
.flush()
.unwrap();
assert!(managed_directory.exists(test_path));
// triggering gc and setting the delete operation to fail.