mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-03 07:42:54 +00:00
Compare commits
24 Commits
bug/merge-
...
issue/483
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31543bdd90 | ||
|
|
ea1402bd82 | ||
|
|
c7bddc5fe3 | ||
|
|
7b97dde335 | ||
|
|
644b4bd0a1 | ||
|
|
bf94fd77db | ||
|
|
097eaf4aa6 | ||
|
|
1fd46c1e9b | ||
|
|
2fb219d017 | ||
|
|
63b593bd0a | ||
|
|
286bb75a0c | ||
|
|
222b7f2580 | ||
|
|
5292e78860 | ||
|
|
c0cc6aac83 | ||
|
|
0b0bf59a32 | ||
|
|
74f70a5c2c | ||
|
|
1acfb2ebb5 | ||
|
|
4dfd091e67 | ||
|
|
8eba4ab807 | ||
|
|
5e8e03882b | ||
|
|
7df3260a15 | ||
|
|
176f67a266 | ||
|
|
19babff849 | ||
|
|
bf2576adf9 |
15
CHANGELOG.md
15
CHANGELOG.md
@@ -1,3 +1,18 @@
|
||||
Tantivy 0.9.0
|
||||
=====================
|
||||
- Removed most unsafe (@fulmicoton)
|
||||
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
|
||||
- Stemming in other language possible (@pentlander)
|
||||
- Segments with no docs are deleted earlier (@barrotsteindev)
|
||||
|
||||
Tantivy 0.8.1
|
||||
=====================
|
||||
Hotfix of #476.
|
||||
|
||||
Merge was reflecting deletes before commit was passed.
|
||||
Thanks @barrotsteindev for reporting the bug.
|
||||
|
||||
|
||||
Tantivy 0.8.0
|
||||
=====================
|
||||
*No change in the index format*
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.8.0"
|
||||
version = "0.9.0-dev"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -29,6 +29,7 @@ serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
num_cpus = "1.2"
|
||||
fs2={version="0.4", optional=true}
|
||||
itertools = "0.8"
|
||||
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
||||
bit-set = "0.5"
|
||||
@@ -39,10 +40,10 @@ futures-cpupool = "0.1"
|
||||
owning_ref = "0.4"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "1"
|
||||
downcast = { version="0.9" }
|
||||
downcast-rs = { version="1.0" }
|
||||
matches = "0.1"
|
||||
bitpacking = "0.5"
|
||||
census = "0.1"
|
||||
census = "0.2"
|
||||
fnv = "1.0.6"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
@@ -70,7 +71,7 @@ overflow-checks = true
|
||||
[features]
|
||||
# by default no-fail is disabled. We manually enable it when running test.
|
||||
default = ["mmap", "no_fail"]
|
||||
mmap = ["fst/mmap", "atomicwrites"]
|
||||
mmap = ["fst/mmap", "atomicwrites", "fs2"]
|
||||
lz4-compression = ["lz4"]
|
||||
no_fail = ["fail/no_fail"]
|
||||
unstable = [] # useful for benches.
|
||||
|
||||
@@ -88,7 +88,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
for i in 0u64..10u64 {
|
||||
index_writer.add_document(doc!(
|
||||
|
||||
@@ -85,7 +85,7 @@ See the `custom_collector` example.
|
||||
|
||||
*/
|
||||
|
||||
use downcast;
|
||||
use downcast_rs;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
@@ -111,9 +111,9 @@ pub use self::facet_collector::FacetCollector;
|
||||
|
||||
/// `Fruit` is the type for the result of our collection.
|
||||
/// e.g. `usize` for the `Count` collector.
|
||||
pub trait Fruit: Send + downcast::Any {}
|
||||
pub trait Fruit: Send + downcast_rs::Downcast {}
|
||||
|
||||
impl<T> Fruit for T where T: Send + downcast::Any {}
|
||||
impl<T> Fruit for T where T: Send + downcast_rs::Downcast {}
|
||||
|
||||
/// Collectors are in charge of collecting and retaining relevant
|
||||
/// information from the document found and scored by the query.
|
||||
@@ -358,10 +358,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
mod downcast_impl {
|
||||
downcast!(super::Fruit);
|
||||
}
|
||||
impl_downcast!(Fruit);
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use super::Collector;
|
||||
use super::SegmentCollector;
|
||||
use collector::Fruit;
|
||||
use downcast::Downcast;
|
||||
use std::marker::PhantomData;
|
||||
use DocId;
|
||||
use Result;
|
||||
@@ -37,11 +36,10 @@ impl<TCollector: Collector> Collector for CollectorWrapper<TCollector> {
|
||||
let typed_fruit: Vec<TCollector::Fruit> = children
|
||||
.into_iter()
|
||||
.map(|untyped_fruit| {
|
||||
Downcast::<TCollector::Fruit>::downcast(untyped_fruit)
|
||||
untyped_fruit.downcast::<TCollector::Fruit>()
|
||||
.map(|boxed_but_typed| *boxed_but_typed)
|
||||
.map_err(|e| {
|
||||
let err_msg = format!("Failed to cast child collector fruit. {:?}", e);
|
||||
TantivyError::InvalidArgument(err_msg)
|
||||
.map_err(|_| {
|
||||
TantivyError::InvalidArgument("Failed to cast child fruit.".to_string())
|
||||
})
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
@@ -89,7 +87,7 @@ pub struct FruitHandle<TFruit: Fruit> {
|
||||
impl<TFruit: Fruit> FruitHandle<TFruit> {
|
||||
pub fn extract(self, fruits: &mut MultiFruit) -> TFruit {
|
||||
let boxed_fruit = fruits.sub_fruits[self.pos].take().expect("");
|
||||
*Downcast::<TFruit>::downcast(boxed_fruit).expect("Failed")
|
||||
*boxed_fruit.downcast::<TFruit>().map_err(|_| ()).expect("Failed to downcast collector fruit.")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"Hello happy tax payer."));
|
||||
index_writer.add_document(doc!(text_field=>"Droopy says hello happy tax payer"));
|
||||
index_writer.add_document(doc!(text_field=>"I like Droopy"));
|
||||
|
||||
@@ -69,8 +69,8 @@ pub fn serialize_vint_u32(val: u32) -> (u64, usize) {
|
||||
/// # May Panic
|
||||
/// If the payload does not start by a valid `vint`
|
||||
fn vint_len(data: &[u8]) -> usize {
|
||||
for i in 0..5.min(data.len()) {
|
||||
if data[i] >= STOP_BIT {
|
||||
for (i, &val) in data.iter().enumerate().take(5) {
|
||||
if val >= STOP_BIT {
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,14 @@ use core::META_FILEPATH;
|
||||
use directory::ManagedDirectory;
|
||||
#[cfg(feature = "mmap")]
|
||||
use directory::MmapDirectory;
|
||||
use directory::INDEX_WRITER_LOCK;
|
||||
use directory::META_LOCK;
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use error::DataCorruption;
|
||||
use error::TantivyError;
|
||||
use indexer::index_writer::open_index_writer;
|
||||
use indexer::index_writer::HEAP_SIZE_MIN;
|
||||
use indexer::segment_updater::save_new_metas;
|
||||
use indexer::LockType;
|
||||
use num_cpus;
|
||||
use schema::Field;
|
||||
use schema::FieldType;
|
||||
@@ -150,7 +151,7 @@ impl Index {
|
||||
///
|
||||
/// This will overwrite existing meta.json
|
||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
|
||||
save_new_metas(schema.clone(), 0, directory.borrow_mut())?;
|
||||
save_new_metas(schema.clone(), directory.borrow_mut())?;
|
||||
let metas = IndexMeta::with_schema(schema);
|
||||
Index::create_from_metas(directory, &metas)
|
||||
}
|
||||
@@ -232,7 +233,8 @@ impl Index {
|
||||
/// Each thread will receive a budget of `overall_heap_size_in_bytes / num_threads`.
|
||||
///
|
||||
/// # Errors
|
||||
/// If the lockfile already exists, returns `Error::FileAlreadyExists`.
|
||||
/// If the lockfile already exists, returns `Error::DirectoryLockBusy` or an `Error::IOError`.
|
||||
///
|
||||
/// # Panics
|
||||
/// If the heap size per thread is too small, panics.
|
||||
pub fn writer_with_num_threads(
|
||||
@@ -240,7 +242,21 @@ impl Index {
|
||||
num_threads: usize,
|
||||
overall_heap_size_in_bytes: usize,
|
||||
) -> Result<IndexWriter> {
|
||||
let directory_lock = LockType::IndexWriterLock.acquire_lock(&self.directory)?;
|
||||
let directory_lock = self
|
||||
.directory
|
||||
.acquire_lock(&INDEX_WRITER_LOCK)
|
||||
.map_err(|err| {
|
||||
TantivyError::LockFailure(
|
||||
err,
|
||||
Some(
|
||||
"Failed to acquire index lock. If you are using\
|
||||
a regular directory, this means there is already an \
|
||||
`IndexWriter` working on this `Directory`, in this process \
|
||||
or in a different process."
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
||||
open_index_writer(
|
||||
self,
|
||||
@@ -339,7 +355,7 @@ impl Index {
|
||||
/// get the freshest `index` at all time, is to watch `meta.json` and
|
||||
/// call `load_searchers` whenever a changes happen.
|
||||
pub fn load_searchers(&self) -> Result<()> {
|
||||
let _meta_lock = LockType::MetaLock.acquire_lock(self.directory())?;
|
||||
let _meta_lock = self.directory().acquire_lock(&META_LOCK)?;
|
||||
let searchable_segments = self.searchable_segments()?;
|
||||
let segment_readers: Vec<SegmentReader> = searchable_segments
|
||||
.iter()
|
||||
|
||||
@@ -41,6 +41,6 @@ impl SegmentComponent {
|
||||
SegmentComponent::STORE,
|
||||
SegmentComponent::DELETE,
|
||||
];
|
||||
SEGMENT_COMPONENTS.into_iter()
|
||||
SEGMENT_COMPONENTS.iter()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,102 @@
|
||||
use directory::directory_lock::Lock;
|
||||
use directory::error::LockError;
|
||||
use directory::error::{DeleteError, OpenReadError, OpenWriteError};
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::marker::Send;
|
||||
use std::marker::Sync;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::result;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
/// Retry the logic of acquiring locks is pretty simple.
|
||||
/// We just retry `n` times after a given `duratio`, both
|
||||
/// depending on the type of lock.
|
||||
struct RetryPolicy {
|
||||
num_retries: usize,
|
||||
wait_in_ms: u64,
|
||||
}
|
||||
|
||||
impl RetryPolicy {
|
||||
fn no_retry() -> RetryPolicy {
|
||||
RetryPolicy {
|
||||
num_retries: 0,
|
||||
wait_in_ms: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_and_retry(&mut self) -> bool {
|
||||
if self.num_retries == 0 {
|
||||
false
|
||||
} else {
|
||||
self.num_retries -= 1;
|
||||
let wait_duration = Duration::from_millis(self.wait_in_ms);
|
||||
thread::sleep(wait_duration);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The `DirectoryLock` is an object that represents a file lock.
|
||||
/// See [`LockType`](struct.LockType.html)
|
||||
///
|
||||
/// It is transparently associated to a lock file, that gets deleted
|
||||
/// on `Drop.` The lock is released automatically on `Drop`.
|
||||
pub struct DirectoryLock(Box<Drop + Send + 'static>);
|
||||
|
||||
struct DirectoryLockGuard {
|
||||
directory: Box<Directory>,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl<T: Drop + Send + 'static> From<Box<T>> for DirectoryLock {
|
||||
fn from(underlying: Box<T>) -> Self {
|
||||
DirectoryLock(underlying)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DirectoryLockGuard {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = self.directory.delete(&*self.path) {
|
||||
error!("Failed to remove the lock file. {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum TryAcquireLockError {
|
||||
FileExists,
|
||||
IOError(io::Error),
|
||||
}
|
||||
|
||||
fn try_acquire_lock(
|
||||
filepath: &Path,
|
||||
directory: &mut Directory,
|
||||
) -> Result<DirectoryLock, TryAcquireLockError> {
|
||||
let mut write = directory.open_write(filepath).map_err(|e| match e {
|
||||
OpenWriteError::FileAlreadyExists(_) => TryAcquireLockError::FileExists,
|
||||
OpenWriteError::IOError(io_error) => TryAcquireLockError::IOError(io_error.into()),
|
||||
})?;
|
||||
write.flush().map_err(TryAcquireLockError::IOError)?;
|
||||
Ok(DirectoryLock::from(Box::new(DirectoryLockGuard {
|
||||
directory: directory.box_clone(),
|
||||
path: filepath.to_owned(),
|
||||
})))
|
||||
}
|
||||
|
||||
fn retry_policy(is_blocking: bool) -> RetryPolicy {
|
||||
if is_blocking {
|
||||
RetryPolicy {
|
||||
num_retries: 100,
|
||||
wait_in_ms: 100,
|
||||
}
|
||||
} else {
|
||||
RetryPolicy::no_retry()
|
||||
}
|
||||
}
|
||||
|
||||
/// Write-once read many (WORM) abstraction for where
|
||||
/// tantivy's data should be stored.
|
||||
@@ -73,6 +164,29 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// The file may or may not previously exist.
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()>;
|
||||
|
||||
/// Acquire a lock in the given directory.
|
||||
///
|
||||
/// The method is blocking or not depending on the `Lock` object.
|
||||
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
||||
let mut box_directory = self.box_clone();
|
||||
let mut retry_policy = retry_policy(lock.is_blocking);
|
||||
loop {
|
||||
match try_acquire_lock(&lock.filepath, &mut *box_directory) {
|
||||
Ok(result) => {
|
||||
return Ok(result);
|
||||
}
|
||||
Err(TryAcquireLockError::FileExists) => {
|
||||
if !retry_policy.wait_and_retry() {
|
||||
return Err(LockError::LockBusy);
|
||||
}
|
||||
}
|
||||
Err(TryAcquireLockError::IOError(io_error)) => {
|
||||
return Err(LockError::IOError(io_error));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// DirectoryClone
|
||||
|
||||
56
src/directory/directory_lock.rs
Normal file
56
src/directory/directory_lock.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// A directory lock.
|
||||
///
|
||||
/// A lock is associated to a specific path and some
|
||||
/// [`LockParams`](./enum.LockParams.html).
|
||||
/// Tantivy itself uses only two locks but client application
|
||||
/// can use the directory facility to define their own locks.
|
||||
/// - [INDEX_WRITER_LOCK](./struct.INDEX_WRITER_LOCK.html)
|
||||
/// - [META_LOCK](./struct.META_LOCK.html)
|
||||
///
|
||||
/// Check out these locks documentation for more information.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
pub struct Lock {
|
||||
/// The lock needs to be associated with its own file `path`.
|
||||
/// Depending on the platform, the lock might rely on the creation
|
||||
/// and deletion of this filepath.
|
||||
pub filepath: PathBuf,
|
||||
/// `lock_params` describes whether acquiring the lock is meant
|
||||
/// to be a blocking operation or a non-blocking.
|
||||
///
|
||||
/// Acquiring a blocking lock blocks until the lock is
|
||||
/// available.
|
||||
/// Acquiring a blocking lock returns rapidly, either successfully
|
||||
/// or with an error signifying that someone is already holding
|
||||
/// the lock.
|
||||
pub is_blocking: bool,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
///
|
||||
/// Failing to acquire this lock usually means a misuse of tantivy's API,
|
||||
/// (creating more than one instance of the `IndexWriter`), are a spurious
|
||||
/// lock file remaining after a crash. In the latter case, removing the file after
|
||||
/// checking no process running tantivy is running is safe.
|
||||
pub static ref INDEX_WRITER_LOCK: Lock = Lock {
|
||||
filepath: PathBuf::from(".tantivy-writer.lock"),
|
||||
is_blocking: false
|
||||
};
|
||||
/// The meta lock file is here to protect the segment files being opened by
|
||||
/// `.load_searchers()` from being garbage collected.
|
||||
/// It makes it possible for another process to safely consume
|
||||
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
|
||||
/// here, but it is difficult to achieve on Windows.
|
||||
///
|
||||
/// Opening segment readers is a very fast process.
|
||||
pub static ref META_LOCK: Lock = Lock {
|
||||
filepath: PathBuf::from(".tantivy-meta.lock"),
|
||||
is_blocking: true
|
||||
};
|
||||
}
|
||||
@@ -3,6 +3,22 @@ use std::fmt;
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Error while trying to acquire a directory lock.
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum LockError {
|
||||
/// Failed to acquired a lock as it is already hold by another
|
||||
/// client.
|
||||
/// - In the context of a blocking lock, this means the lock was not released within some `timeout` period.
|
||||
/// - In the context of a non-blocking lock, this means the lock was busy at the moment of the call.
|
||||
#[fail(
|
||||
display = "Could not acquire lock as it is already held, possibly by a different process."
|
||||
)]
|
||||
LockBusy,
|
||||
/// Trying to acquire a lock failed with an `IOError`
|
||||
#[fail(display = "Failed to acquire the lock due to an io:Error.")]
|
||||
IOError(io::Error),
|
||||
}
|
||||
|
||||
/// General IO error with an optional path to the offending file.
|
||||
#[derive(Debug)]
|
||||
pub struct IOError {
|
||||
@@ -10,6 +26,12 @@ pub struct IOError {
|
||||
err: io::Error,
|
||||
}
|
||||
|
||||
impl Into<io::Error> for IOError {
|
||||
fn into(self) -> io::Error {
|
||||
self.err
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for IOError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.path {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use core::MANAGED_FILEPATH;
|
||||
use directory::error::{DeleteError, IOError, OpenReadError, OpenWriteError};
|
||||
use directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||
use directory::DirectoryLock;
|
||||
use directory::Lock;
|
||||
use directory::META_LOCK;
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use error::DataCorruption;
|
||||
use indexer::LockType;
|
||||
use serde_json;
|
||||
use std::collections::HashSet;
|
||||
use std::io;
|
||||
@@ -92,6 +94,9 @@ impl ManagedDirectory {
|
||||
///
|
||||
/// * `living_files` - List of files that are still used by the index.
|
||||
///
|
||||
/// The use a callback ensures that the list of living_files is computed
|
||||
/// while we hold the lock on meta.
|
||||
///
|
||||
/// This method does not panick nor returns errors.
|
||||
/// If a file cannot be deleted (for permission reasons for instance)
|
||||
/// an error is simply logged, and the file remains in the list of managed
|
||||
@@ -122,7 +127,7 @@ impl ManagedDirectory {
|
||||
// 2) writer change meta.json (for instance after a merge or a commit)
|
||||
// 3) gc kicks in.
|
||||
// 4) gc removes a file that was useful for process B, before process B opened it.
|
||||
if let Ok(_meta_lock) = LockType::MetaLock.acquire_lock(self) {
|
||||
if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) {
|
||||
let living_files = get_living_files();
|
||||
for managed_path in &meta_informations_rlock.managed_paths {
|
||||
if !living_files.contains(managed_path) {
|
||||
@@ -232,6 +237,10 @@ impl Directory for ManagedDirectory {
|
||||
fn exists(&self, path: &Path) -> bool {
|
||||
self.directory.exists(path)
|
||||
}
|
||||
|
||||
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
|
||||
self.directory.acquire_lock(lock)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ManagedDirectory {
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
extern crate fs2;
|
||||
|
||||
use self::fs2::FileExt;
|
||||
use atomicwrites;
|
||||
use common::make_io_err;
|
||||
use directory::error::LockError;
|
||||
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use directory::shared_vec_slice::SharedVecSlice;
|
||||
use directory::Directory;
|
||||
use directory::DirectoryLock;
|
||||
use directory::Lock;
|
||||
use directory::ReadOnlySource;
|
||||
use directory::WritePtr;
|
||||
use fst::raw::MmapReadOnly;
|
||||
@@ -115,6 +121,14 @@ impl MmapCache {
|
||||
///
|
||||
/// The Mmap object are cached to limit the
|
||||
/// system calls.
|
||||
///
|
||||
/// In the `MmapDirectory`, locks are implemented using the `fs2` crate definition of locks.
|
||||
///
|
||||
/// On MacOS & linux, it relies on `flock` (aka `BSD Lock`). These locks solve most of the
|
||||
/// problems related to POSIX Locks, but may their contract may not be respected on `NFS`
|
||||
/// depending on the implementation.
|
||||
///
|
||||
/// On Windows the semantics are again different.
|
||||
#[derive(Clone)]
|
||||
pub struct MmapDirectory {
|
||||
root_path: PathBuf,
|
||||
@@ -213,6 +227,21 @@ impl MmapDirectory {
|
||||
}
|
||||
}
|
||||
|
||||
/// We rely on fs2 for file locking. On Windows & MacOS this
|
||||
/// uses BSD locks (`flock`). The lock is actually released when
|
||||
/// the `File` object is dropped and its associated file descriptor
|
||||
/// is closed.
|
||||
struct ReleaseLockFile {
|
||||
_file: File,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl Drop for ReleaseLockFile {
|
||||
fn drop(&mut self) {
|
||||
debug!("Releasing lock {:?}", self.path);
|
||||
}
|
||||
}
|
||||
|
||||
/// This Write wraps a File, but has the specificity of
|
||||
/// call `sync_all` on flush.
|
||||
struct SafeFileWriter(File);
|
||||
@@ -354,6 +383,26 @@ impl Directory for MmapDirectory {
|
||||
meta_file.write(|f| f.write_all(data))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn acquire_lock(&self, lock: &Lock) -> Result<DirectoryLock, LockError> {
|
||||
let full_path = self.resolve_path(&lock.filepath);
|
||||
// We make sure that the file exists.
|
||||
let file: File = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true) //< if the file does not exist yet, create it.
|
||||
.open(&full_path)
|
||||
.map_err(LockError::IOError)?;
|
||||
if lock.is_blocking {
|
||||
file.lock_exclusive().map_err(LockError::IOError)?;
|
||||
} else {
|
||||
file.try_lock_exclusive().map_err(|_| LockError::LockBusy)?
|
||||
}
|
||||
// dropping the file handle will release the lock.
|
||||
Ok(DirectoryLock::from(Box::new(ReleaseLockFile {
|
||||
path: lock.filepath.clone(),
|
||||
_file: file,
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -8,6 +8,7 @@ WORM directory abstraction.
|
||||
mod mmap_directory;
|
||||
|
||||
mod directory;
|
||||
mod directory_lock;
|
||||
mod managed_directory;
|
||||
mod ram_directory;
|
||||
mod read_only_source;
|
||||
@@ -16,11 +17,12 @@ mod shared_vec_slice;
|
||||
/// Errors specific to the directory module.
|
||||
pub mod error;
|
||||
|
||||
use std::io::{BufWriter, Seek, Write};
|
||||
|
||||
pub use self::directory::DirectoryLock;
|
||||
pub use self::directory::{Directory, DirectoryClone};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub use self::ram_directory::RAMDirectory;
|
||||
pub use self::read_only_source::ReadOnlySource;
|
||||
use std::io::{BufWriter, Seek, Write};
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
pub use self::mmap_directory::MmapDirectory;
|
||||
@@ -38,128 +40,4 @@ impl<T: Seek + Write> SeekableWrite for T {}
|
||||
pub type WritePtr = BufWriter<Box<SeekableWrite>>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use std::path::Path;
|
||||
|
||||
lazy_static! {
|
||||
static ref TEST_PATH: &'static Path = Path::new("some_path_for_test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ram_directory() {
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
test_directory(&mut ram_directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_mmap_directory() {
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
test_directory(&mut mmap_directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn ram_directory_panics_if_flush_forgotten() {
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
let mut write_file = ram_directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(write_file.write_all(&[4]).is_ok());
|
||||
}
|
||||
|
||||
fn test_simple(directory: &mut Directory) {
|
||||
{
|
||||
{
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
write_file.write_all(&[4]).unwrap();
|
||||
write_file.write_all(&[3]).unwrap();
|
||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
let read_file = directory.open_read(*TEST_PATH).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert!(!directory.exists(*TEST_PATH));
|
||||
}
|
||||
|
||||
fn test_seek(directory: &mut Directory) {
|
||||
{
|
||||
{
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
write_file.write_all(&[4, 3, 7, 3, 5]).unwrap();
|
||||
write_file.seek(SeekFrom::Start(0)).unwrap();
|
||||
write_file.write_all(&[3, 1]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
let read_file = directory.open_read(*TEST_PATH).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[3u8, 1u8, 7u8, 3u8, 5u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
|
||||
fn test_rewrite_forbidden(directory: &mut Directory) {
|
||||
{
|
||||
directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
}
|
||||
{
|
||||
assert!(directory.open_write(*TEST_PATH).is_err());
|
||||
}
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
|
||||
fn test_write_create_the_file(directory: &mut Directory) {
|
||||
{
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
let _w = directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
assert!(directory.open_read(*TEST_PATH).is_ok());
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
fn test_directory_delete(directory: &mut Directory) {
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
{
|
||||
let read_handle = directory.open_read(*TEST_PATH).unwrap();
|
||||
{
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
|
||||
// Mapped files can't be deleted on Windows
|
||||
if !cfg!(windows) {
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
if cfg!(windows) {
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
assert!(directory.delete(*TEST_PATH).is_err());
|
||||
}
|
||||
|
||||
fn test_directory(directory: &mut Directory) {
|
||||
test_simple(directory);
|
||||
test_seek(directory);
|
||||
test_rewrite_forbidden(directory);
|
||||
test_write_create_the_file(directory);
|
||||
test_directory_delete(directory);
|
||||
}
|
||||
|
||||
}
|
||||
mod tests;
|
||||
|
||||
182
src/directory/tests.rs
Normal file
182
src/directory/tests.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
use super::*;
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time;
|
||||
|
||||
lazy_static! {
|
||||
static ref TEST_PATH: &'static Path = Path::new("some_path_for_test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ram_directory() {
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
test_directory(&mut ram_directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_mmap_directory() {
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
test_directory(&mut mmap_directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn ram_directory_panics_if_flush_forgotten() {
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
let mut write_file = ram_directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(write_file.write_all(&[4]).is_ok());
|
||||
}
|
||||
|
||||
fn test_simple(directory: &mut Directory) {
|
||||
{
|
||||
{
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
write_file.write_all(&[4]).unwrap();
|
||||
write_file.write_all(&[3]).unwrap();
|
||||
write_file.write_all(&[7, 3, 5]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
let read_file = directory.open_read(*TEST_PATH).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert!(!directory.exists(*TEST_PATH));
|
||||
}
|
||||
|
||||
fn test_seek(directory: &mut Directory) {
|
||||
{
|
||||
{
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
write_file.write_all(&[4, 3, 7, 3, 5]).unwrap();
|
||||
write_file.seek(SeekFrom::Start(0)).unwrap();
|
||||
write_file.write_all(&[3, 1]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
}
|
||||
let read_file = directory.open_read(*TEST_PATH).unwrap();
|
||||
let data: &[u8] = &*read_file;
|
||||
assert_eq!(data, &[3u8, 1u8, 7u8, 3u8, 5u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
|
||||
fn test_rewrite_forbidden(directory: &mut Directory) {
|
||||
{
|
||||
directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
}
|
||||
{
|
||||
assert!(directory.open_write(*TEST_PATH).is_err());
|
||||
}
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
|
||||
fn test_write_create_the_file(directory: &mut Directory) {
|
||||
{
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
let _w = directory.open_write(*TEST_PATH).unwrap();
|
||||
assert!(directory.exists(*TEST_PATH));
|
||||
assert!(directory.open_read(*TEST_PATH).is_ok());
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
fn test_directory_delete(directory: &mut Directory) {
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
let mut write_file = directory.open_write(*TEST_PATH).unwrap();
|
||||
write_file.write_all(&[1, 2, 3, 4]).unwrap();
|
||||
write_file.flush().unwrap();
|
||||
{
|
||||
let read_handle = directory.open_read(*TEST_PATH).unwrap();
|
||||
{
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
|
||||
// Mapped files can't be deleted on Windows
|
||||
if !cfg!(windows) {
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
assert_eq!(&*read_handle, &[1u8, 2u8, 3u8, 4u8]);
|
||||
}
|
||||
|
||||
assert!(directory.delete(Path::new("SomeOtherPath")).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
if cfg!(windows) {
|
||||
assert!(directory.delete(*TEST_PATH).is_ok());
|
||||
}
|
||||
|
||||
assert!(directory.open_read(*TEST_PATH).is_err());
|
||||
assert!(directory.delete(*TEST_PATH).is_err());
|
||||
}
|
||||
|
||||
fn test_directory(directory: &mut Directory) {
|
||||
test_simple(directory);
|
||||
test_seek(directory);
|
||||
test_rewrite_forbidden(directory);
|
||||
test_write_create_the_file(directory);
|
||||
test_directory_delete(directory);
|
||||
test_lock_non_blocking(directory);
|
||||
test_lock_blocking(directory);
|
||||
}
|
||||
|
||||
fn test_lock_non_blocking(directory: &mut Directory) {
|
||||
{
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: false,
|
||||
});
|
||||
assert!(lock_a_res.is_ok());
|
||||
let lock_b_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("b.lock"),
|
||||
is_blocking: false,
|
||||
});
|
||||
assert!(lock_b_res.is_ok());
|
||||
let lock_a_res2 = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: false,
|
||||
});
|
||||
assert!(lock_a_res2.is_err());
|
||||
}
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: false,
|
||||
});
|
||||
assert!(lock_a_res.is_ok());
|
||||
}
|
||||
|
||||
fn test_lock_blocking(directory: &mut Directory) {
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: true,
|
||||
});
|
||||
assert!(lock_a_res.is_ok());
|
||||
std::thread::spawn(move || {
|
||||
//< lock_a_res is sent to the thread.
|
||||
std::thread::sleep(time::Duration::from_millis(10));
|
||||
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
||||
// to be part of the move, but the intent seems clearer that way.
|
||||
drop(lock_a_res);
|
||||
});
|
||||
{
|
||||
// A non-blocking call should fail, as the thread is running and holding the lock.
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: false,
|
||||
});
|
||||
assert!(lock_a_res.is_err());
|
||||
}
|
||||
{
|
||||
// the blocking call should wait for at least 10ms.
|
||||
let start = time::Instant::now();
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: true,
|
||||
});
|
||||
assert!(lock_a_res.is_ok());
|
||||
assert!(start.elapsed().subsec_millis() >= 10);
|
||||
}
|
||||
}
|
||||
15
src/error.rs
15
src/error.rs
@@ -2,9 +2,9 @@
|
||||
|
||||
use std::io;
|
||||
|
||||
use directory::error::LockError;
|
||||
use directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use fastfield::FastFieldNotAvailableError;
|
||||
use indexer::LockType;
|
||||
use query;
|
||||
use schema;
|
||||
use serde_json;
|
||||
@@ -57,11 +57,8 @@ pub enum TantivyError {
|
||||
#[fail(display = "Index already exists")]
|
||||
IndexAlreadyExists,
|
||||
/// Failed to acquire file lock
|
||||
#[fail(
|
||||
display = "Failed to acquire Lockfile: {:?}. Possible causes: another IndexWriter instance or panic during previous lock drop.",
|
||||
_0
|
||||
)]
|
||||
LockFailure(LockType),
|
||||
#[fail(display = "Failed to acquire Lockfile: {:?}. {:?}", _0, _1)]
|
||||
LockFailure(LockError, Option<String>),
|
||||
/// IO Error.
|
||||
#[fail(display = "An IO error occurred: '{}'", _0)]
|
||||
IOError(#[cause] IOError),
|
||||
@@ -100,6 +97,12 @@ impl From<FastFieldNotAvailableError> for TantivyError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LockError> for TantivyError {
|
||||
fn from(lock_error: LockError) -> TantivyError {
|
||||
TantivyError::LockFailure(lock_error, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IOError> for TantivyError {
|
||||
fn from(io_error: IOError) -> TantivyError {
|
||||
TantivyError::IOError(io_error)
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
use directory::error::OpenWriteError;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use Directory;
|
||||
use TantivyError;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum LockType {
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
///
|
||||
/// Failing to acquire this lock usually means a misuse of tantivy's API,
|
||||
/// (creating more than one instance of the `IndexWriter`), are a spurious
|
||||
/// lock file remaining after a crash. In the latter case, removing the file after
|
||||
/// checking no process running tantivy is running is safe.
|
||||
IndexWriterLock,
|
||||
/// The meta lock file is here to protect the segment files being opened by
|
||||
/// `.load_searchers()` from being garbage collected.
|
||||
/// It makes it possible for another process to safely consume
|
||||
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
|
||||
/// here, but it is difficult to achieve on Windows.
|
||||
///
|
||||
/// Opening segment readers is a very fast process.
|
||||
/// Right now if the lock cannot be acquire on the first attempt, the logic
|
||||
/// is very simplistic. We retry after `100ms` until we effectively
|
||||
/// acquire the lock.
|
||||
/// This lock should not have much contention in normal usage.
|
||||
MetaLock,
|
||||
}
|
||||
|
||||
/// Retry the logic of acquiring locks is pretty simple.
|
||||
/// We just retry `n` times after a given `duratio`, both
|
||||
/// depending on the type of lock.
|
||||
struct RetryPolicy {
|
||||
num_retries: usize,
|
||||
wait_in_ms: u64,
|
||||
}
|
||||
|
||||
impl RetryPolicy {
|
||||
fn no_retry() -> RetryPolicy {
|
||||
RetryPolicy {
|
||||
num_retries: 0,
|
||||
wait_in_ms: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_and_retry(&mut self) -> bool {
|
||||
if self.num_retries == 0 {
|
||||
false
|
||||
} else {
|
||||
self.num_retries -= 1;
|
||||
let wait_duration = Duration::from_millis(self.wait_in_ms);
|
||||
thread::sleep(wait_duration);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LockType {
|
||||
fn retry_policy(self) -> RetryPolicy {
|
||||
match self {
|
||||
LockType::IndexWriterLock => RetryPolicy::no_retry(),
|
||||
LockType::MetaLock => RetryPolicy {
|
||||
num_retries: 100,
|
||||
wait_in_ms: 100,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn try_acquire_lock(self, directory: &mut Directory) -> Result<DirectoryLock, TantivyError> {
|
||||
let path = self.filename();
|
||||
let mut write = directory.open_write(path).map_err(|e| match e {
|
||||
OpenWriteError::FileAlreadyExists(_) => TantivyError::LockFailure(self),
|
||||
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
})?;
|
||||
write.flush()?;
|
||||
Ok(DirectoryLock {
|
||||
directory: directory.box_clone(),
|
||||
path: path.to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Acquire a lock in the given directory.
|
||||
pub fn acquire_lock(self, directory: &Directory) -> Result<DirectoryLock, TantivyError> {
|
||||
let mut box_directory = directory.box_clone();
|
||||
let mut retry_policy = self.retry_policy();
|
||||
loop {
|
||||
let lock_result = self.try_acquire_lock(&mut *box_directory);
|
||||
match lock_result {
|
||||
Ok(result) => {
|
||||
return Ok(result);
|
||||
}
|
||||
Err(TantivyError::LockFailure(ref filepath)) => {
|
||||
if !retry_policy.wait_and_retry() {
|
||||
return Err(TantivyError::LockFailure(filepath.to_owned()));
|
||||
}
|
||||
}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn filename(&self) -> &Path {
|
||||
match *self {
|
||||
LockType::MetaLock => Path::new(".tantivy-meta.lock"),
|
||||
LockType::IndexWriterLock => Path::new(".tantivy-indexer.lock"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The `DirectoryLock` is an object that represents a file lock.
|
||||
/// See [`LockType`](struct.LockType.html)
|
||||
///
|
||||
/// It is transparently associated to a lock file, that gets deleted
|
||||
/// on `Drop.` The lock is release automatically on `Drop`.
|
||||
pub struct DirectoryLock {
|
||||
directory: Box<Directory>,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl Drop for DirectoryLock {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = self.directory.delete(&*self.path) {
|
||||
error!("Failed to remove the lock file. {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,15 +9,15 @@ use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use core::SegmentReader;
|
||||
use crossbeam::channel;
|
||||
use directory::DirectoryLock;
|
||||
use docset::DocSet;
|
||||
use error::TantivyError;
|
||||
use fastfield::write_delete_bitset;
|
||||
use futures::sync::oneshot::Receiver;
|
||||
use futures::{Canceled, Future};
|
||||
use indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||
use indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
||||
use indexer::operation::DeleteOperation;
|
||||
use indexer::stamper::Stamper;
|
||||
use indexer::DirectoryLock;
|
||||
use indexer::MergePolicy;
|
||||
use indexer::SegmentEntry;
|
||||
use indexer::SegmentWriter;
|
||||
@@ -26,6 +26,7 @@ use schema::Document;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::Term;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::thread::JoinHandle;
|
||||
use Result;
|
||||
@@ -366,13 +367,16 @@ impl IndexWriter {
|
||||
.add_segment(self.generation, segment_entry);
|
||||
}
|
||||
|
||||
/// *Experimental & Advanced API* Creates a new segment.
|
||||
/// and marks it as currently in write.
|
||||
/// Creates a new segment.
|
||||
///
|
||||
/// This method is useful only for users trying to do complex
|
||||
/// operations, like converting an index format to another.
|
||||
///
|
||||
/// It is safe to start writing file associated to the new `Segment`.
|
||||
/// These will not be garbage collected as long as an instance object of
|
||||
/// `SegmentMeta` object associated to the new `Segment` is "alive".
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
self.segment_updater.new_segment()
|
||||
self.index.new_segment()
|
||||
}
|
||||
|
||||
/// Spawns a new worker thread for indexing.
|
||||
@@ -387,6 +391,7 @@ impl IndexWriter {
|
||||
let mut delete_cursor = self.delete_queue.cursor();
|
||||
|
||||
let mem_budget = self.heap_size_in_bytes_per_thread;
|
||||
let index = self.index.clone();
|
||||
let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
|
||||
.name(format!(
|
||||
"thrd-tantivy-index{}-gen{}",
|
||||
@@ -412,7 +417,7 @@ impl IndexWriter {
|
||||
// was dropped.
|
||||
return Ok(());
|
||||
}
|
||||
let segment = segment_updater.new_segment();
|
||||
let segment = index.new_segment();
|
||||
index_documents(
|
||||
mem_budget,
|
||||
&segment,
|
||||
@@ -429,7 +434,7 @@ impl IndexWriter {
|
||||
}
|
||||
|
||||
/// Accessor to the merge policy.
|
||||
pub fn get_merge_policy(&self) -> Box<MergePolicy> {
|
||||
pub fn get_merge_policy(&self) -> Arc<Box<MergePolicy>> {
|
||||
self.segment_updater.get_merge_policy()
|
||||
}
|
||||
|
||||
@@ -454,7 +459,10 @@ impl IndexWriter {
|
||||
/// Merges a given list of segments
|
||||
///
|
||||
/// `segment_ids` is required to be non-empty.
|
||||
pub fn merge(&mut self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
|
||||
pub fn merge(
|
||||
&mut self,
|
||||
segment_ids: &[SegmentId],
|
||||
) -> Result<impl Future<Item = SegmentMeta, Error = Canceled>> {
|
||||
self.segment_updater.start_merge(segment_ids)
|
||||
}
|
||||
|
||||
@@ -557,8 +565,7 @@ impl IndexWriter {
|
||||
// and recreate a new one channels.
|
||||
self.recreate_document_channel();
|
||||
|
||||
let former_workers_join_handle =
|
||||
mem::replace(&mut self.workers_join_handle, Vec::new());
|
||||
let former_workers_join_handle = mem::replace(&mut self.workers_join_handle, Vec::new());
|
||||
|
||||
for worker_handle in former_workers_join_handle {
|
||||
let indexing_worker_result = worker_handle
|
||||
@@ -648,6 +655,7 @@ impl IndexWriter {
|
||||
mod tests {
|
||||
|
||||
use super::initial_table_size;
|
||||
use directory::error::LockError;
|
||||
use error::*;
|
||||
use indexer::NoMergePolicy;
|
||||
use schema::{self, Document};
|
||||
@@ -658,10 +666,10 @@ mod tests {
|
||||
fn test_lockfile_stops_duplicates() {
|
||||
let schema_builder = schema::Schema::builder();
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let _index_writer = index.writer(40_000_000).unwrap();
|
||||
match index.writer(40_000_000) {
|
||||
Err(TantivyError::LockFailure(_)) => {}
|
||||
_ => panic!("Expected FileAlreadyExists error"),
|
||||
let _index_writer = index.writer(3_000_000).unwrap();
|
||||
match index.writer(3_000_000) {
|
||||
Err(TantivyError::LockFailure(LockError::LockBusy, _)) => {}
|
||||
_ => panic!("Expected a `LockFailure` error"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -673,8 +681,7 @@ mod tests {
|
||||
match index.writer_with_num_threads(1, 3_000_000) {
|
||||
Err(err) => {
|
||||
let err_msg = err.to_string();
|
||||
assert!(err_msg.contains("Lockfile"));
|
||||
assert!(err_msg.contains("Possible causes:"))
|
||||
assert!(err_msg.contains("already an `IndexWriter`"));
|
||||
}
|
||||
_ => panic!("Expected LockfileAlreadyExists error"),
|
||||
}
|
||||
@@ -684,7 +691,7 @@ mod tests {
|
||||
fn test_set_merge_policy() {
|
||||
let schema_builder = schema::Schema::builder();
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let index_writer = index.writer(40_000_000).unwrap();
|
||||
let index_writer = index.writer(3_000_000).unwrap();
|
||||
assert_eq!(
|
||||
format!("{:?}", index_writer.get_merge_policy()),
|
||||
"LogMergePolicy { min_merge_size: 8, min_layer_size: 10000, \
|
||||
@@ -703,11 +710,11 @@ mod tests {
|
||||
let schema_builder = schema::Schema::builder();
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let _index_writer = index.writer(40_000_000).unwrap();
|
||||
let _index_writer = index.writer(3_000_000).unwrap();
|
||||
// the lock should be released when the
|
||||
// index_writer leaves the scope.
|
||||
}
|
||||
let _index_writer_two = index.writer(40_000_000).unwrap();
|
||||
let _index_writer_two = index.writer(3_000_000).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -734,7 +741,7 @@ mod tests {
|
||||
index_writer.add_document(doc!(text_field=>"b"));
|
||||
index_writer.add_document(doc!(text_field=>"c"));
|
||||
}
|
||||
assert_eq!(index_writer.commit().unwrap(), 2u64);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
index.load_searchers().unwrap();
|
||||
assert_eq!(num_docs_containing("a"), 0);
|
||||
assert_eq!(num_docs_containing("b"), 1);
|
||||
@@ -797,7 +804,6 @@ mod tests {
|
||||
{
|
||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
||||
prepared_commit.set_payload("first commit");
|
||||
assert_eq!(prepared_commit.opstamp(), 100);
|
||||
prepared_commit.commit().expect("commit failed");
|
||||
}
|
||||
{
|
||||
@@ -831,7 +837,6 @@ mod tests {
|
||||
{
|
||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
||||
prepared_commit.set_payload("first commit");
|
||||
assert_eq!(prepared_commit.opstamp(), 100);
|
||||
prepared_commit.abort().expect("commit failed");
|
||||
}
|
||||
{
|
||||
|
||||
64
src/indexer/merge_operation.rs
Normal file
64
src/indexer/merge_operation.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use census::{Inventory, TrackedObject};
|
||||
use std::collections::HashSet;
|
||||
use SegmentId;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
||||
|
||||
impl MergeOperationInventory {
|
||||
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
|
||||
let mut segment_in_merge = HashSet::default();
|
||||
for merge_op in self.0.list() {
|
||||
for &segment_id in &merge_op.segment_ids {
|
||||
segment_in_merge.insert(segment_id);
|
||||
}
|
||||
}
|
||||
segment_in_merge
|
||||
}
|
||||
}
|
||||
|
||||
/// A `MergeOperation` has two role.
|
||||
/// It carries all of the information required to describe a merge :
|
||||
/// - `target_opstamp` is the opstamp up to which we want to consume the
|
||||
/// delete queue and reflect their deletes.
|
||||
/// - `segment_ids` is the list of segment to be merged.
|
||||
///
|
||||
/// The second role is to ensure keep track of the fact that these
|
||||
/// segments are in merge and avoid starting a merge operation that
|
||||
/// may conflict with this one.
|
||||
///
|
||||
/// This works by tracking merge operations. When considering computing
|
||||
/// merge candidates, we simply list tracked merge operations and remove
|
||||
/// their segments from possible merge candidates.
|
||||
pub struct MergeOperation {
|
||||
inner: TrackedObject<InnerMergeOperation>,
|
||||
}
|
||||
|
||||
struct InnerMergeOperation {
|
||||
target_opstamp: u64,
|
||||
segment_ids: Vec<SegmentId>,
|
||||
}
|
||||
|
||||
impl MergeOperation {
|
||||
pub fn new(
|
||||
inventory: &MergeOperationInventory,
|
||||
target_opstamp: u64,
|
||||
segment_ids: Vec<SegmentId>,
|
||||
) -> MergeOperation {
|
||||
let inner_merge_operation = InnerMergeOperation {
|
||||
target_opstamp,
|
||||
segment_ids,
|
||||
};
|
||||
MergeOperation {
|
||||
inner: inventory.0.track(inner_merge_operation),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn target_opstamp(&self) -> u64 {
|
||||
self.inner.target_opstamp
|
||||
}
|
||||
|
||||
pub fn segment_ids(&self) -> &[SegmentId] {
|
||||
&self.inner.segment_ids[..]
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,7 @@ pub struct MergeCandidate(pub Vec<SegmentId>);
|
||||
///
|
||||
/// Every time a the list of segments changes, the segment updater
|
||||
/// asks the merge policy if some segments should be merged.
|
||||
pub trait MergePolicy: MergePolicyClone + marker::Send + marker::Sync + Debug {
|
||||
pub trait MergePolicy: marker::Send + marker::Sync + Debug {
|
||||
/// Given the list of segment metas, returns the list of merge candidates.
|
||||
///
|
||||
/// This call happens on the segment updater thread, and will block
|
||||
@@ -19,21 +19,6 @@ pub trait MergePolicy: MergePolicyClone + marker::Send + marker::Sync + Debug {
|
||||
fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate>;
|
||||
}
|
||||
|
||||
/// MergePolicyClone
|
||||
pub trait MergePolicyClone {
|
||||
/// Returns a boxed clone of the MergePolicy.
|
||||
fn box_clone(&self) -> Box<MergePolicy>;
|
||||
}
|
||||
|
||||
impl<T> MergePolicyClone for T
|
||||
where
|
||||
T: 'static + MergePolicy + Clone,
|
||||
{
|
||||
fn box_clone(&self) -> Box<MergePolicy> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// Never merge segments.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NoMergePolicy;
|
||||
|
||||
@@ -654,6 +654,7 @@ mod tests {
|
||||
use schema::IntOptions;
|
||||
use schema::Term;
|
||||
use schema::TextFieldIndexing;
|
||||
use schema::INT_INDEXED;
|
||||
use std::io::Cursor;
|
||||
use DocAddress;
|
||||
use IndexWriter;
|
||||
@@ -835,7 +836,7 @@ mod tests {
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
|
||||
let search_term = |searcher: &Searcher, term: Term| {
|
||||
let collector = FastFieldTestCollector::for_field(score_field);
|
||||
@@ -983,7 +984,7 @@ mod tests {
|
||||
.wait()
|
||||
.expect("Merging failed");
|
||||
index.load_searchers().unwrap();
|
||||
let ref searcher = *index.searcher();
|
||||
let searcher = index.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
assert_eq!(searcher.num_docs(), 3);
|
||||
assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
|
||||
@@ -1029,7 +1030,7 @@ mod tests {
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let ref searcher = *index.searcher();
|
||||
let searcher = index.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
assert_eq!(searcher.num_docs(), 2);
|
||||
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
|
||||
@@ -1125,18 +1126,15 @@ mod tests {
|
||||
{
|
||||
// Test removing all docs
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "g"));
|
||||
index_writer.commit().unwrap();
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
index_writer
|
||||
.merge(&segment_ids)
|
||||
.expect("Failed to initiate merge")
|
||||
.wait()
|
||||
.expect("Merging failed");
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
let ref searcher = *index.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
assert!(segment_ids.is_empty());
|
||||
assert!(searcher.segment_readers().is_empty());
|
||||
assert_eq!(searcher.num_docs(), 0);
|
||||
}
|
||||
}
|
||||
@@ -1147,7 +1145,7 @@ mod tests {
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
|
||||
let mut doc = Document::default();
|
||||
for facet in doc_facets {
|
||||
@@ -1212,7 +1210,7 @@ mod tests {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer
|
||||
.merge(&segment_ids)
|
||||
.expect("Failed to initiate merge")
|
||||
@@ -1235,7 +1233,7 @@ mod tests {
|
||||
|
||||
// Deleting one term
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
|
||||
let facet_term = Term::from_facet(facet_field, &facet);
|
||||
index_writer.delete_term(facet_term);
|
||||
@@ -1255,6 +1253,34 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bug_merge() {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let int_field = schema_builder.add_u64_field("intvals", INT_INDEXED);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(int_field => 1u64));
|
||||
index_writer.commit().expect("commit failed");
|
||||
index_writer.add_document(doc!(int_field => 1u64));
|
||||
index_writer.commit().expect("commit failed");
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
assert_eq!(searcher.num_docs(), 2);
|
||||
index_writer.delete_term(Term::from_field_u64(int_field, 1));
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
index_writer
|
||||
.merge(&segment_ids)
|
||||
.expect("Failed to initiate merge")
|
||||
.wait()
|
||||
.expect("Merging failed");
|
||||
index.load_searchers().unwrap();
|
||||
// commit has not been called yet. The document should still be
|
||||
// there.
|
||||
assert_eq!(index.searcher().num_docs(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_multivalued_int_fields_all_deleted() {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
@@ -1265,7 +1291,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let mut doc = Document::default();
|
||||
doc.add_u64(int_field, 1);
|
||||
index_writer.add_document(doc.clone());
|
||||
@@ -1273,24 +1299,26 @@ mod tests {
|
||||
index_writer.add_document(doc);
|
||||
index_writer.commit().expect("commit failed");
|
||||
index_writer.delete_term(Term::from_field_u64(int_field, 1));
|
||||
index_writer.commit().expect("commit failed");
|
||||
}
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
assert_eq!(searcher.num_docs(), 0);
|
||||
// Merging the segments
|
||||
{
|
||||
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
index_writer
|
||||
.merge(&segment_ids)
|
||||
.expect("Failed to initiate merge")
|
||||
.wait()
|
||||
.expect("Merging failed");
|
||||
|
||||
// assert delete has not been committed
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
assert_eq!(searcher.num_docs(), 2);
|
||||
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
index_writer.wait_merging_threads().unwrap();
|
||||
}
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
let searcher = index.searcher();
|
||||
assert_eq!(searcher.num_docs(), 0);
|
||||
@@ -1306,7 +1334,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
|
||||
let mut doc = Document::default();
|
||||
for &val in int_vals {
|
||||
@@ -1395,7 +1423,7 @@ mod tests {
|
||||
let segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer
|
||||
.merge(&segment_ids)
|
||||
.expect("Failed to initiate merge")
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
pub mod delete_queue;
|
||||
mod directory_lock;
|
||||
|
||||
mod doc_opstamp_mapping;
|
||||
pub mod index_writer;
|
||||
mod log_merge_policy;
|
||||
mod merge_operation;
|
||||
pub mod merge_policy;
|
||||
pub mod merger;
|
||||
pub mod operation;
|
||||
@@ -15,14 +16,12 @@ pub mod segment_updater;
|
||||
mod segment_writer;
|
||||
mod stamper;
|
||||
|
||||
pub(crate) use self::directory_lock::DirectoryLock;
|
||||
pub use self::directory_lock::LockType;
|
||||
|
||||
pub use self::index_writer::IndexWriter;
|
||||
pub use self::log_merge_policy::LogMergePolicy;
|
||||
pub use self::merge_operation::{MergeOperation, MergeOperationInventory};
|
||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||
pub use self::prepared_commit::PreparedCommit;
|
||||
pub use self::segment_entry::{SegmentEntry, SegmentState};
|
||||
pub use self::segment_entry::SegmentEntry;
|
||||
pub use self::segment_manager::SegmentManager;
|
||||
pub use self::segment_serializer::SegmentSerializer;
|
||||
pub use self::segment_writer::SegmentWriter;
|
||||
|
||||
@@ -4,21 +4,6 @@ use core::SegmentMeta;
|
||||
use indexer::delete_queue::DeleteCursor;
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum SegmentState {
|
||||
Ready,
|
||||
InMerge,
|
||||
}
|
||||
|
||||
impl SegmentState {
|
||||
pub fn letter_code(self) -> char {
|
||||
match self {
|
||||
SegmentState::InMerge => 'M',
|
||||
SegmentState::Ready => 'R',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A segment entry describes the state of
|
||||
/// a given segment, at a given instant.
|
||||
///
|
||||
@@ -35,7 +20,6 @@ impl SegmentState {
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentEntry {
|
||||
meta: SegmentMeta,
|
||||
state: SegmentState,
|
||||
delete_bitset: Option<BitSet>,
|
||||
delete_cursor: DeleteCursor,
|
||||
}
|
||||
@@ -49,7 +33,6 @@ impl SegmentEntry {
|
||||
) -> SegmentEntry {
|
||||
SegmentEntry {
|
||||
meta: segment_meta,
|
||||
state: SegmentState::Ready,
|
||||
delete_bitset,
|
||||
delete_cursor,
|
||||
}
|
||||
@@ -72,14 +55,6 @@ impl SegmentEntry {
|
||||
&mut self.delete_cursor
|
||||
}
|
||||
|
||||
/// Return the `SegmentEntry`.
|
||||
///
|
||||
/// The state describes whether the segment is available for
|
||||
/// a merge or not.
|
||||
pub fn state(&self) -> SegmentState {
|
||||
self.state
|
||||
}
|
||||
|
||||
/// Returns the segment id.
|
||||
pub fn segment_id(&self) -> SegmentId {
|
||||
self.meta.id()
|
||||
@@ -89,33 +64,10 @@ impl SegmentEntry {
|
||||
pub fn meta(&self) -> &SegmentMeta {
|
||||
&self.meta
|
||||
}
|
||||
|
||||
/// Mark the `SegmentEntry` as in merge.
|
||||
///
|
||||
/// Only segments that are not already
|
||||
/// in a merge are elligible for future merge.
|
||||
pub fn start_merge(&mut self) {
|
||||
self.state = SegmentState::InMerge;
|
||||
}
|
||||
|
||||
/// Cancel a merge
|
||||
///
|
||||
/// If a merge fails, it is important to switch
|
||||
/// the segment back to a idle state, so that it
|
||||
/// may be elligible for future merges.
|
||||
pub fn cancel_merge(&mut self) {
|
||||
self.state = SegmentState::Ready;
|
||||
}
|
||||
|
||||
/// Returns true iff a segment should
|
||||
/// be considered for a merge.
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.state == SegmentState::Ready
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentEntry {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(formatter, "SegmentEntry({:?}, {:?})", self.meta, self.state)
|
||||
write!(formatter, "SegmentEntry({:?})", self.meta)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ use Result as TantivyResult;
|
||||
struct SegmentRegisters {
|
||||
uncommitted: SegmentRegister,
|
||||
committed: SegmentRegister,
|
||||
writing: HashSet<SegmentId>,
|
||||
}
|
||||
|
||||
/// The segment manager stores the list of segments
|
||||
@@ -41,12 +40,17 @@ impl Debug for SegmentManager {
|
||||
}
|
||||
|
||||
pub fn get_mergeable_segments(
|
||||
in_merge_segment_ids: &HashSet<SegmentId>,
|
||||
segment_manager: &SegmentManager,
|
||||
) -> (Vec<SegmentMeta>, Vec<SegmentMeta>) {
|
||||
let registers_lock = segment_manager.read();
|
||||
(
|
||||
registers_lock.committed.get_mergeable_segments(),
|
||||
registers_lock.uncommitted.get_mergeable_segments(),
|
||||
registers_lock
|
||||
.committed
|
||||
.get_mergeable_segments(in_merge_segment_ids),
|
||||
registers_lock
|
||||
.uncommitted
|
||||
.get_mergeable_segments(in_merge_segment_ids),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -59,7 +63,6 @@ impl SegmentManager {
|
||||
registers: RwLock::new(SegmentRegisters {
|
||||
uncommitted: SegmentRegister::default(),
|
||||
committed: SegmentRegister::new(segment_metas, delete_cursor),
|
||||
writing: HashSet::new(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -72,12 +75,6 @@ impl SegmentManager {
|
||||
segment_entries
|
||||
}
|
||||
|
||||
/// Returns the overall number of segments in the `SegmentManager`
|
||||
pub fn num_segments(&self) -> usize {
|
||||
let registers_lock = self.read();
|
||||
registers_lock.committed.len() + registers_lock.uncommitted.len()
|
||||
}
|
||||
|
||||
/// List the files that are useful to the index.
|
||||
///
|
||||
/// This does not include lock files, or files that are obsolete
|
||||
@@ -106,6 +103,21 @@ impl SegmentManager {
|
||||
.expect("Failed to acquire write lock on SegmentManager.")
|
||||
}
|
||||
|
||||
/// Deletes all empty segments
|
||||
fn remove_empty_segments(&self) {
|
||||
let mut registers_lock = self.write();
|
||||
registers_lock
|
||||
.committed
|
||||
.segment_entries()
|
||||
.iter()
|
||||
.filter(|segment| segment.meta().num_docs() == 0)
|
||||
.for_each(|segment| {
|
||||
registers_lock
|
||||
.committed
|
||||
.remove_segment(&segment.segment_id())
|
||||
});
|
||||
}
|
||||
|
||||
pub fn commit(&self, segment_entries: Vec<SegmentEntry>) {
|
||||
let mut registers_lock = self.write();
|
||||
registers_lock.committed.clear();
|
||||
@@ -121,25 +133,22 @@ impl SegmentManager {
|
||||
/// the `segment_ids` are not either all committed or all
|
||||
/// uncommitted.
|
||||
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> {
|
||||
let mut registers_lock = self.write();
|
||||
let registers_lock = self.read();
|
||||
let mut segment_entries = vec![];
|
||||
if registers_lock.uncommitted.contains_all(segment_ids) {
|
||||
for segment_id in segment_ids {
|
||||
let segment_entry = registers_lock.uncommitted
|
||||
.start_merge(segment_id)
|
||||
.get(segment_id)
|
||||
.expect("Segment id not found {}. Should never happen because of the contains all if-block.");
|
||||
segment_entries.push(segment_entry);
|
||||
}
|
||||
} else if registers_lock.committed.contains_all(segment_ids) {
|
||||
for segment_id in segment_ids {
|
||||
let segment_entry = registers_lock.committed
|
||||
.start_merge(segment_id)
|
||||
.get(segment_id)
|
||||
.expect("Segment id not found {}. Should never happen because of the contains all if-block.");
|
||||
segment_entries.push(segment_entry);
|
||||
}
|
||||
for segment_id in segment_ids {
|
||||
registers_lock.committed.start_merge(segment_id);
|
||||
}
|
||||
} else {
|
||||
let error_msg = "Merge operation sent for segments that are not \
|
||||
all uncommited or commited."
|
||||
@@ -149,50 +158,8 @@ impl SegmentManager {
|
||||
Ok(segment_entries)
|
||||
}
|
||||
|
||||
pub fn cancel_merge(
|
||||
&self,
|
||||
before_merge_segment_ids: &[SegmentId],
|
||||
after_merge_segment_id: SegmentId,
|
||||
) {
|
||||
let mut registers_lock = self.write();
|
||||
|
||||
// we mark all segments are ready for merge.
|
||||
{
|
||||
let target_segment_register: &mut SegmentRegister;
|
||||
target_segment_register = {
|
||||
if registers_lock
|
||||
.uncommitted
|
||||
.contains_all(before_merge_segment_ids)
|
||||
{
|
||||
&mut registers_lock.uncommitted
|
||||
} else if registers_lock
|
||||
.committed
|
||||
.contains_all(before_merge_segment_ids)
|
||||
{
|
||||
&mut registers_lock.committed
|
||||
} else {
|
||||
warn!("couldn't find segment in SegmentManager");
|
||||
return;
|
||||
}
|
||||
};
|
||||
for segment_id in before_merge_segment_ids {
|
||||
target_segment_register.cancel_merge(segment_id);
|
||||
}
|
||||
}
|
||||
|
||||
// ... and we make sure the target segment entry
|
||||
// can be garbage collected.
|
||||
registers_lock.writing.remove(&after_merge_segment_id);
|
||||
}
|
||||
|
||||
pub fn write_segment(&self, segment_id: SegmentId) {
|
||||
let mut registers_lock = self.write();
|
||||
registers_lock.writing.insert(segment_id);
|
||||
}
|
||||
|
||||
pub fn add_segment(&self, segment_entry: SegmentEntry) {
|
||||
let mut registers_lock = self.write();
|
||||
registers_lock.writing.remove(&segment_entry.segment_id());
|
||||
registers_lock.uncommitted.add_segment_entry(segment_entry);
|
||||
}
|
||||
|
||||
@@ -202,10 +169,6 @@ impl SegmentManager {
|
||||
after_merge_segment_entry: SegmentEntry,
|
||||
) {
|
||||
let mut registers_lock = self.write();
|
||||
registers_lock
|
||||
.writing
|
||||
.remove(&after_merge_segment_entry.segment_id());
|
||||
|
||||
let target_register: &mut SegmentRegister = {
|
||||
if registers_lock
|
||||
.uncommitted
|
||||
@@ -229,6 +192,7 @@ impl SegmentManager {
|
||||
}
|
||||
|
||||
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {
|
||||
self.remove_empty_segments();
|
||||
let registers_lock = self.read();
|
||||
registers_lock.committed.segment_metas()
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use core::SegmentMeta;
|
||||
use indexer::delete_queue::DeleteCursor;
|
||||
use indexer::segment_entry::SegmentEntry;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::{self, Debug, Formatter};
|
||||
|
||||
/// The segment register keeps track
|
||||
@@ -21,8 +22,8 @@ pub struct SegmentRegister {
|
||||
impl Debug for SegmentRegister {
|
||||
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
|
||||
write!(f, "SegmentRegister(")?;
|
||||
for (k, v) in &self.segment_states {
|
||||
write!(f, "{}:{}, ", k.short_uuid_string(), v.state().letter_code())?;
|
||||
for k in self.segment_states.keys() {
|
||||
write!(f, "{}, ", k.short_uuid_string())?;
|
||||
}
|
||||
write!(f, ")")?;
|
||||
Ok(())
|
||||
@@ -34,14 +35,13 @@ impl SegmentRegister {
|
||||
self.segment_states.clear();
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.segment_states.len()
|
||||
}
|
||||
|
||||
pub fn get_mergeable_segments(&self) -> Vec<SegmentMeta> {
|
||||
pub fn get_mergeable_segments(
|
||||
&self,
|
||||
in_merge_segment_ids: &HashSet<SegmentId>,
|
||||
) -> Vec<SegmentMeta> {
|
||||
self.segment_states
|
||||
.values()
|
||||
.filter(|segment_entry| segment_entry.is_ready())
|
||||
.filter(|segment_entry| !in_merge_segment_ids.contains(&segment_entry.segment_id()))
|
||||
.map(|segment_entry| segment_entry.meta().clone())
|
||||
.collect()
|
||||
}
|
||||
@@ -60,7 +60,7 @@ impl SegmentRegister {
|
||||
segment_ids
|
||||
}
|
||||
|
||||
pub fn contains_all(&mut self, segment_ids: &[SegmentId]) -> bool {
|
||||
pub fn contains_all(&self, segment_ids: &[SegmentId]) -> bool {
|
||||
segment_ids
|
||||
.iter()
|
||||
.all(|segment_id| self.segment_states.contains_key(segment_id))
|
||||
@@ -75,20 +75,8 @@ impl SegmentRegister {
|
||||
self.segment_states.remove(segment_id);
|
||||
}
|
||||
|
||||
pub fn cancel_merge(&mut self, segment_id: &SegmentId) {
|
||||
self.segment_states
|
||||
.get_mut(segment_id)
|
||||
.expect("Received a merge notification for a segment that is not registered")
|
||||
.cancel_merge();
|
||||
}
|
||||
|
||||
pub fn start_merge(&mut self, segment_id: &SegmentId) -> Option<SegmentEntry> {
|
||||
if let Some(segment_entry) = self.segment_states.get_mut(segment_id) {
|
||||
segment_entry.start_merge();
|
||||
Some(segment_entry.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
pub fn get(&self, segment_id: &SegmentId) -> Option<SegmentEntry> {
|
||||
self.segment_states.get(segment_id).cloned()
|
||||
}
|
||||
|
||||
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister {
|
||||
@@ -100,11 +88,6 @@ impl SegmentRegister {
|
||||
}
|
||||
SegmentRegister { segment_states }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn segment_entry(&self, segment_id: &SegmentId) -> Option<SegmentEntry> {
|
||||
self.segment_states.get(segment_id).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -113,7 +96,6 @@ mod tests {
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use indexer::delete_queue::*;
|
||||
use indexer::SegmentState;
|
||||
|
||||
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
|
||||
segment_register
|
||||
@@ -137,42 +119,12 @@ mod tests {
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
assert_eq!(
|
||||
segment_register
|
||||
.segment_entry(&segment_id_a)
|
||||
.unwrap()
|
||||
.state(),
|
||||
SegmentState::Ready
|
||||
);
|
||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
|
||||
{
|
||||
let segment_meta = SegmentMeta::new(segment_id_b, 0u32);
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
assert_eq!(
|
||||
segment_register
|
||||
.segment_entry(&segment_id_b)
|
||||
.unwrap()
|
||||
.state(),
|
||||
SegmentState::Ready
|
||||
);
|
||||
segment_register.start_merge(&segment_id_a);
|
||||
segment_register.start_merge(&segment_id_b);
|
||||
assert_eq!(
|
||||
segment_register
|
||||
.segment_entry(&segment_id_a)
|
||||
.unwrap()
|
||||
.state(),
|
||||
SegmentState::InMerge
|
||||
);
|
||||
assert_eq!(
|
||||
segment_register
|
||||
.segment_entry(&segment_id_b)
|
||||
.unwrap()
|
||||
.state(),
|
||||
SegmentState::InMerge
|
||||
);
|
||||
segment_register.remove_segment(&segment_id_a);
|
||||
segment_register.remove_segment(&segment_id_b);
|
||||
{
|
||||
|
||||
@@ -16,9 +16,10 @@ use futures_cpupool::CpuFuture;
|
||||
use futures_cpupool::CpuPool;
|
||||
use indexer::delete_queue::DeleteCursor;
|
||||
use indexer::index_writer::advance_deletes;
|
||||
use indexer::merge_operation::MergeOperationInventory;
|
||||
use indexer::merger::IndexMerger;
|
||||
use indexer::stamper::Stamper;
|
||||
use indexer::MergeCandidate;
|
||||
use indexer::MergeOperation;
|
||||
use indexer::SegmentEntry;
|
||||
use indexer::SegmentSerializer;
|
||||
use indexer::{DefaultMergePolicy, MergePolicy};
|
||||
@@ -26,6 +27,7 @@ use schema::Schema;
|
||||
use serde_json;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::io::Write;
|
||||
use std::mem;
|
||||
use std::ops::DerefMut;
|
||||
@@ -45,8 +47,16 @@ use Result;
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
pub fn save_new_metas(schema: Schema, opstamp: u64, directory: &mut Directory) -> Result<()> {
|
||||
save_metas(vec![], schema, opstamp, None, directory)
|
||||
pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> {
|
||||
save_metas(
|
||||
&IndexMeta {
|
||||
segments: Vec::new(),
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
},
|
||||
directory,
|
||||
)
|
||||
}
|
||||
|
||||
/// Save the index meta file.
|
||||
@@ -58,20 +68,9 @@ pub fn save_new_metas(schema: Schema, opstamp: u64, directory: &mut Directory) -
|
||||
/// and flushed.
|
||||
///
|
||||
/// This method is not part of tantivy's public API
|
||||
pub fn save_metas(
|
||||
segment_metas: Vec<SegmentMeta>,
|
||||
schema: Schema,
|
||||
opstamp: u64,
|
||||
payload: Option<String>,
|
||||
directory: &mut Directory,
|
||||
) -> Result<()> {
|
||||
let metas = IndexMeta {
|
||||
segments: segment_metas,
|
||||
schema,
|
||||
opstamp,
|
||||
payload,
|
||||
};
|
||||
let mut buffer = serde_json::to_vec_pretty(&metas)?;
|
||||
fn save_metas(metas: &IndexMeta, directory: &mut Directory) -> Result<()> {
|
||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||
// Just adding a new line at the end of the buffer.
|
||||
writeln!(&mut buffer)?;
|
||||
directory.atomic_write(&META_FILEPATH, &buffer[..])?;
|
||||
debug!("Saved metas {:?}", serde_json::to_string_pretty(&metas));
|
||||
@@ -83,16 +82,21 @@ pub fn save_metas(
|
||||
//
|
||||
// All this processing happens on a single thread
|
||||
// consuming a common queue.
|
||||
//
|
||||
// We voluntarily pass a merge_operation ref to guarantee that
|
||||
// the merge_operation is alive during the process
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
|
||||
|
||||
fn perform_merge(
|
||||
merge_operation: &MergeOperation,
|
||||
index: &Index,
|
||||
mut segment_entries: Vec<SegmentEntry>,
|
||||
mut merged_segment: Segment,
|
||||
target_opstamp: u64,
|
||||
) -> Result<SegmentEntry> {
|
||||
let target_opstamp = merge_operation.target_opstamp();
|
||||
|
||||
// first we need to apply deletes to our segment.
|
||||
let mut merged_segment = index.new_segment();
|
||||
|
||||
// TODO add logging
|
||||
let schema = index.schema();
|
||||
@@ -126,15 +130,23 @@ fn perform_merge(
|
||||
}
|
||||
|
||||
struct InnerSegmentUpdater {
|
||||
// we keep a copy of the current active IndexMeta to
|
||||
// avoid loading the file everytime we need it in the
|
||||
// `SegmentUpdater`.
|
||||
//
|
||||
// This should be up to date as all update happen through
|
||||
// the unique active `SegmentUpdater`.
|
||||
active_metas: RwLock<Arc<IndexMeta>>,
|
||||
pool: CpuPool,
|
||||
index: Index,
|
||||
segment_manager: SegmentManager,
|
||||
merge_policy: RwLock<Box<MergePolicy>>,
|
||||
merge_policy: RwLock<Arc<Box<MergePolicy>>>,
|
||||
merging_thread_id: AtomicUsize,
|
||||
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
|
||||
generation: AtomicUsize,
|
||||
killed: AtomicBool,
|
||||
stamper: Stamper,
|
||||
merge_operations: MergeOperationInventory,
|
||||
}
|
||||
|
||||
impl SegmentUpdater {
|
||||
@@ -149,32 +161,29 @@ impl SegmentUpdater {
|
||||
.name_prefix("segment_updater")
|
||||
.pool_size(1)
|
||||
.create();
|
||||
let index_meta = index.load_metas()?;
|
||||
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
|
||||
active_metas: RwLock::new(Arc::new(index_meta)),
|
||||
pool,
|
||||
index,
|
||||
segment_manager,
|
||||
merge_policy: RwLock::new(Box::new(DefaultMergePolicy::default())),
|
||||
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
||||
merging_thread_id: AtomicUsize::default(),
|
||||
merging_threads: RwLock::new(HashMap::new()),
|
||||
generation: AtomicUsize::default(),
|
||||
killed: AtomicBool::new(false),
|
||||
stamper,
|
||||
merge_operations: Default::default(),
|
||||
})))
|
||||
}
|
||||
|
||||
pub fn new_segment(&self) -> Segment {
|
||||
let new_segment = self.0.index.new_segment();
|
||||
let segment_id = new_segment.id();
|
||||
self.0.segment_manager.write_segment(segment_id);
|
||||
new_segment
|
||||
}
|
||||
|
||||
pub fn get_merge_policy(&self) -> Box<MergePolicy> {
|
||||
self.0.merge_policy.read().unwrap().box_clone()
|
||||
pub fn get_merge_policy(&self) -> Arc<Box<MergePolicy>> {
|
||||
self.0.merge_policy.read().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn set_merge_policy(&self, merge_policy: Box<MergePolicy>) {
|
||||
*self.0.merge_policy.write().unwrap() = merge_policy;
|
||||
let arc_merge_policy = Arc::new(merge_policy);
|
||||
*self.0.merge_policy.write().unwrap() = arc_merge_policy;
|
||||
}
|
||||
|
||||
fn get_merging_thread_id(&self) -> usize {
|
||||
@@ -244,14 +253,15 @@ impl SegmentUpdater {
|
||||
//
|
||||
// Segment 1 from disk 1, Segment 1 from disk 2, etc.
|
||||
commited_segment_metas.sort_by_key(|segment_meta| -(segment_meta.max_doc() as i32));
|
||||
save_metas(
|
||||
commited_segment_metas,
|
||||
index.schema(),
|
||||
let index_meta = IndexMeta {
|
||||
segments: commited_segment_metas,
|
||||
schema: index.schema(),
|
||||
opstamp,
|
||||
commit_message,
|
||||
directory.box_clone().borrow_mut(),
|
||||
)
|
||||
.expect("Could not save metas.");
|
||||
payload: commit_message,
|
||||
};
|
||||
save_metas(&index_meta, directory.box_clone().borrow_mut())
|
||||
.expect("Could not save metas.");
|
||||
self.store_meta(&index_meta);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,51 +296,62 @@ impl SegmentUpdater {
|
||||
}
|
||||
|
||||
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
|
||||
//let future_merged_segment = */
|
||||
let segment_ids_vec = segment_ids.to_vec();
|
||||
self.run_async(move |segment_updater| {
|
||||
segment_updater.start_merge_impl(&segment_ids_vec[..])
|
||||
})
|
||||
.wait()?
|
||||
let commit_opstamp = self.load_metas().opstamp;
|
||||
let merge_operation = MergeOperation::new(
|
||||
&self.0.merge_operations,
|
||||
commit_opstamp,
|
||||
segment_ids.to_vec(),
|
||||
);
|
||||
self.run_async(move |segment_updater| segment_updater.start_merge_impl(merge_operation))
|
||||
.wait()?
|
||||
}
|
||||
|
||||
fn store_meta(&self, index_meta: &IndexMeta) {
|
||||
*self.0.active_metas.write().unwrap() = Arc::new(index_meta.clone());
|
||||
}
|
||||
fn load_metas(&self) -> Arc<IndexMeta> {
|
||||
self.0.active_metas.read().unwrap().clone()
|
||||
}
|
||||
|
||||
// `segment_ids` is required to be non-empty.
|
||||
fn start_merge_impl(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
|
||||
assert!(!segment_ids.is_empty(), "Segment_ids cannot be empty.");
|
||||
fn start_merge_impl(&self, merge_operation: MergeOperation) -> Result<Receiver<SegmentMeta>> {
|
||||
assert!(
|
||||
!merge_operation.segment_ids().is_empty(),
|
||||
"Segment_ids cannot be empty."
|
||||
);
|
||||
|
||||
let segment_updater_clone = self.clone();
|
||||
let segment_entries: Vec<SegmentEntry> = self.0.segment_manager.start_merge(segment_ids)?;
|
||||
let segment_entries: Vec<SegmentEntry> = self
|
||||
.0
|
||||
.segment_manager
|
||||
.start_merge(merge_operation.segment_ids())?;
|
||||
|
||||
let segment_ids_vec = segment_ids.to_vec();
|
||||
// let segment_ids_vec = merge_operation.segment_ids.to_vec();
|
||||
|
||||
let merging_thread_id = self.get_merging_thread_id();
|
||||
info!(
|
||||
"Starting merge thread #{} - {:?}",
|
||||
merging_thread_id, segment_ids
|
||||
merging_thread_id,
|
||||
merge_operation.segment_ids()
|
||||
);
|
||||
let (merging_future_send, merging_future_recv) = oneshot();
|
||||
|
||||
let target_opstamp = self.0.stamper.stamp();
|
||||
|
||||
// first we need to apply deletes to our segment.
|
||||
let merging_join_handle = thread::Builder::new()
|
||||
.name(format!("mergingthread-{}", merging_thread_id))
|
||||
.spawn(move || {
|
||||
// first we need to apply deletes to our segment.
|
||||
let merged_segment = segment_updater_clone.new_segment();
|
||||
let merged_segment_id = merged_segment.id();
|
||||
let merge_result = perform_merge(
|
||||
&merge_operation,
|
||||
&segment_updater_clone.0.index,
|
||||
segment_entries,
|
||||
merged_segment,
|
||||
target_opstamp,
|
||||
);
|
||||
|
||||
match merge_result {
|
||||
Ok(after_merge_segment_entry) => {
|
||||
let merged_segment_meta = after_merge_segment_entry.meta().clone();
|
||||
segment_updater_clone
|
||||
.end_merge(segment_ids_vec, after_merge_segment_entry)
|
||||
.end_merge(merge_operation, after_merge_segment_entry)
|
||||
.expect("Segment updater thread is corrupted.");
|
||||
|
||||
// the future may fail if the listener of the oneshot future
|
||||
@@ -341,13 +362,18 @@ impl SegmentUpdater {
|
||||
let _merging_future_res = merging_future_send.send(merged_segment_meta);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Merge of {:?} was cancelled: {:?}", segment_ids_vec, e);
|
||||
warn!(
|
||||
"Merge of {:?} was cancelled: {:?}",
|
||||
merge_operation.segment_ids(),
|
||||
e
|
||||
);
|
||||
// ... cancel merge
|
||||
if cfg!(test) {
|
||||
panic!("Merge failed.");
|
||||
}
|
||||
segment_updater_clone.cancel_merge(&segment_ids_vec, merged_segment_id);
|
||||
// merging_future_send will be dropped, sending an error to the future.
|
||||
// As `merge_operation` will be dropped, the segment in merge state will
|
||||
// be available for merge again.
|
||||
// `merging_future_send` will be dropped, sending an error to the future.
|
||||
}
|
||||
}
|
||||
segment_updater_clone
|
||||
@@ -368,16 +394,34 @@ impl SegmentUpdater {
|
||||
}
|
||||
|
||||
fn consider_merge_options(&self) {
|
||||
let merge_segment_ids: HashSet<SegmentId> = self.0.merge_operations.segment_in_merge();
|
||||
let (committed_segments, uncommitted_segments) =
|
||||
get_mergeable_segments(&self.0.segment_manager);
|
||||
get_mergeable_segments(&merge_segment_ids, &self.0.segment_manager);
|
||||
|
||||
// Committed segments cannot be merged with uncommitted_segments.
|
||||
// We therefore consider merges using these two sets of segments independently.
|
||||
let merge_policy = self.get_merge_policy();
|
||||
let mut merge_candidates = merge_policy.compute_merge_candidates(&uncommitted_segments);
|
||||
let committed_merge_candidates = merge_policy.compute_merge_candidates(&committed_segments);
|
||||
merge_candidates.extend_from_slice(&committed_merge_candidates[..]);
|
||||
for MergeCandidate(segment_metas) in merge_candidates {
|
||||
match self.start_merge_impl(&segment_metas) {
|
||||
|
||||
let current_opstamp = self.0.stamper.stamp();
|
||||
let mut merge_candidates: Vec<MergeOperation> = merge_policy
|
||||
.compute_merge_candidates(&uncommitted_segments)
|
||||
.into_iter()
|
||||
.map(|merge_candidate| {
|
||||
MergeOperation::new(&self.0.merge_operations, current_opstamp, merge_candidate.0)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let commit_opstamp = self.load_metas().opstamp;
|
||||
let committed_merge_candidates = merge_policy
|
||||
.compute_merge_candidates(&committed_segments)
|
||||
.into_iter()
|
||||
.map(|merge_candidate| {
|
||||
MergeOperation::new(&self.0.merge_operations, commit_opstamp, merge_candidate.0)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
||||
for merge_operation in merge_candidates {
|
||||
match self.start_merge_impl(merge_operation) {
|
||||
Ok(merge_future) => {
|
||||
if let Err(e) = merge_future.fuse().poll() {
|
||||
error!("The merge task failed quickly after starting: {:?}", e);
|
||||
@@ -393,31 +437,16 @@ impl SegmentUpdater {
|
||||
}
|
||||
}
|
||||
|
||||
fn cancel_merge(
|
||||
&self,
|
||||
before_merge_segment_ids: &[SegmentId],
|
||||
after_merge_segment_entry: SegmentId,
|
||||
) {
|
||||
self.0
|
||||
.segment_manager
|
||||
.cancel_merge(before_merge_segment_ids, after_merge_segment_entry);
|
||||
}
|
||||
|
||||
fn end_merge(
|
||||
&self,
|
||||
before_merge_segment_ids: Vec<SegmentId>,
|
||||
merge_operation: MergeOperation,
|
||||
mut after_merge_segment_entry: SegmentEntry,
|
||||
) -> Result<()> {
|
||||
self.run_async(move |segment_updater| {
|
||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
||||
if let Some(delete_operation) = delete_cursor.get() {
|
||||
let committed_opstamp = segment_updater
|
||||
.0
|
||||
.index
|
||||
.load_metas()
|
||||
.expect("Failed to read opstamp")
|
||||
.opstamp;
|
||||
let committed_opstamp = segment_updater.load_metas().opstamp;
|
||||
if delete_operation.opstamp < committed_opstamp {
|
||||
let index = &segment_updater.0.index;
|
||||
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
||||
@@ -426,16 +455,15 @@ impl SegmentUpdater {
|
||||
{
|
||||
error!(
|
||||
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
|
||||
before_merge_segment_ids, e
|
||||
merge_operation.segment_ids(),
|
||||
e
|
||||
);
|
||||
// ... cancel merge
|
||||
if cfg!(test) {
|
||||
panic!("Merge failed.");
|
||||
}
|
||||
segment_updater.cancel_merge(
|
||||
&before_merge_segment_ids,
|
||||
after_merge_segment_entry.segment_id(),
|
||||
);
|
||||
// ... cancel merge
|
||||
// `merge_operations` are tracked. As it is dropped, the
|
||||
// the segment_ids will be available again for merge.
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -443,11 +471,11 @@ impl SegmentUpdater {
|
||||
segment_updater
|
||||
.0
|
||||
.segment_manager
|
||||
.end_merge(&before_merge_segment_ids, after_merge_segment_entry);
|
||||
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
|
||||
segment_updater.consider_merge_options();
|
||||
info!("save metas");
|
||||
let previous_metas = segment_updater.0.index.load_metas().unwrap();
|
||||
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload);
|
||||
let previous_metas = segment_updater.load_metas();
|
||||
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
|
||||
segment_updater.garbage_collect_files_exec();
|
||||
})
|
||||
.wait()
|
||||
@@ -469,32 +497,25 @@ impl SegmentUpdater {
|
||||
/// Obsolete files will eventually be cleaned up
|
||||
/// by the directory garbage collector.
|
||||
pub fn wait_merging_thread(&self) -> Result<()> {
|
||||
let mut num_segments: usize;
|
||||
loop {
|
||||
num_segments = self.0.segment_manager.num_segments();
|
||||
|
||||
let mut new_merging_threads = HashMap::new();
|
||||
{
|
||||
let merging_threads: HashMap<usize, JoinHandle<Result<()>>> = {
|
||||
let mut merging_threads = self.0.merging_threads.write().unwrap();
|
||||
mem::swap(&mut new_merging_threads, merging_threads.deref_mut());
|
||||
mem::replace(merging_threads.deref_mut(), HashMap::new())
|
||||
};
|
||||
if merging_threads.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
debug!("wait merging thread {}", new_merging_threads.len());
|
||||
for (_, merging_thread_handle) in new_merging_threads {
|
||||
debug!("wait merging thread {}", merging_threads.len());
|
||||
for (_, merging_thread_handle) in merging_threads {
|
||||
merging_thread_handle
|
||||
.join()
|
||||
.map(|_| ())
|
||||
.map_err(|_| TantivyError::ErrorInThread("Merging thread failed.".into()))?;
|
||||
}
|
||||
// Our merging thread may have queued their completed
|
||||
// Our merging thread may have queued their completed merged segment.
|
||||
// Let's wait for that too.
|
||||
self.run_async(move |_| {}).wait()?;
|
||||
|
||||
let new_num_segments = self.0.segment_manager.num_segments();
|
||||
|
||||
if new_num_segments >= num_segments {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -514,7 +535,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(MergeWheneverPossible));
|
||||
|
||||
{
|
||||
@@ -558,4 +579,75 @@ mod tests {
|
||||
assert_eq!(index.searcher().segment_readers().len(), 1);
|
||||
assert_eq!(index.searcher().num_docs(), 302);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_all_docs() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
|
||||
{
|
||||
for _ in 0..100 {
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
index_writer.add_document(doc!(text_field=>"b"));
|
||||
}
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
for _ in 0..100 {
|
||||
index_writer.add_document(doc!(text_field=>"c"));
|
||||
index_writer.add_document(doc!(text_field=>"d"));
|
||||
}
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
index_writer.add_document(doc!(text_field=>"e"));
|
||||
index_writer.add_document(doc!(text_field=>"f"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
let seg_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
// docs exist, should have at least 1 segment
|
||||
assert!(seg_ids.len() > 0);
|
||||
}
|
||||
|
||||
{
|
||||
let term_vals = vec!["a", "b", "c", "d", "e", "f"];
|
||||
for term_val in term_vals {
|
||||
let term = Term::from_field_text(text_field, term_val);
|
||||
index_writer.delete_term(term);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
index_writer
|
||||
.wait_merging_threads()
|
||||
.expect("waiting for merging threads");
|
||||
}
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
assert_eq!(index.searcher().num_docs(), 0);
|
||||
|
||||
let seg_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("Searchable segments failed.");
|
||||
assert!(seg_ids.is_empty());
|
||||
|
||||
index.load_searchers().unwrap();
|
||||
assert_eq!(index.searcher().num_docs(), 0);
|
||||
// empty segments should be erased
|
||||
assert!(index.searchable_segment_metas().unwrap().is_empty());
|
||||
assert!(index.searcher().segment_readers().is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,50 +1,66 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
// AtomicU64 have not landed in stable.
|
||||
// For the moment let's just use AtomicUsize on
|
||||
// x86/64 bit platform, and a mutex on other platform.
|
||||
|
||||
#[cfg(target = "x86_64")]
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
mod archicture_impl {
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Stamper(Arc<AtomicU64>);
|
||||
#[derive(Default)]
|
||||
pub struct AtomicU64Ersatz(AtomicUsize);
|
||||
|
||||
impl Stamper {
|
||||
pub fn new(first_opstamp: u64) -> Stamper {
|
||||
Stamper(Arc::new(AtomicU64::new(first_opstamp)))
|
||||
impl AtomicU64Ersatz {
|
||||
pub fn new(first_opstamp: u64) -> AtomicU64Ersatz {
|
||||
AtomicU64Ersatz(AtomicUsize::new(first_opstamp as usize))
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> u64 {
|
||||
self.0.fetch_add(1u64, Ordering::SeqCst) as u64
|
||||
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
||||
self.0.fetch_add(val as usize, order) as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target = "x86_64"))]
|
||||
#[cfg(not(target_arch = "x86_64"))]
|
||||
mod archicture_impl {
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::Ordering;
|
||||
/// Under other architecture, we rely on a mutex.
|
||||
use std::sync::RwLock;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Stamper(Arc<Mutex<u64>>);
|
||||
#[derive(Default)]
|
||||
pub struct AtomicU64Ersatz(RwLock<u64>);
|
||||
|
||||
impl Stamper {
|
||||
pub fn new(first_opstamp: u64) -> Stamper {
|
||||
Stamper(Arc::new(Mutex::new(first_opstamp)))
|
||||
impl AtomicU64Ersatz {
|
||||
pub fn new(first_opstamp: u64) -> AtomicU64Ersatz {
|
||||
AtomicU64Ersatz(RwLock::new(first_opstamp))
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> u64 {
|
||||
let mut guard = self.0.lock().expect("Failed to lock the stamper");
|
||||
let previous_val = *guard;
|
||||
*guard = previous_val + 1;
|
||||
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
|
||||
let mut lock = self.0.write().unwrap();
|
||||
let previous_val = *lock;
|
||||
*lock = previous_val + incr;
|
||||
previous_val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub use self::archicture_impl::Stamper;
|
||||
use self::archicture_impl::AtomicU64Ersatz;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Stamper(Arc<AtomicU64Ersatz>);
|
||||
|
||||
impl Stamper {
|
||||
pub fn new(first_opstamp: u64) -> Stamper {
|
||||
Stamper(Arc::new(AtomicU64Ersatz::new(first_opstamp)))
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> u64 {
|
||||
self.0.fetch_add(1u64, Ordering::SeqCst) as u64
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
40
src/lib.rs
40
src/lib.rs
@@ -129,10 +129,7 @@ extern crate base64;
|
||||
extern crate bit_set;
|
||||
extern crate bitpacking;
|
||||
extern crate byteorder;
|
||||
extern crate scoped_pool;
|
||||
|
||||
extern crate combine;
|
||||
|
||||
extern crate crossbeam;
|
||||
extern crate fnv;
|
||||
extern crate fst;
|
||||
@@ -146,6 +143,7 @@ extern crate num_cpus;
|
||||
extern crate owning_ref;
|
||||
extern crate regex;
|
||||
extern crate rust_stemmers;
|
||||
extern crate scoped_pool;
|
||||
extern crate serde;
|
||||
extern crate stable_deref_trait;
|
||||
extern crate tempdir;
|
||||
@@ -170,7 +168,7 @@ extern crate maplit;
|
||||
extern crate test;
|
||||
|
||||
#[macro_use]
|
||||
extern crate downcast;
|
||||
extern crate downcast_rs;
|
||||
|
||||
#[macro_use]
|
||||
extern crate fail;
|
||||
@@ -231,11 +229,7 @@ pub use common::{i64_to_u64, u64_to_i64};
|
||||
/// Expose the current version of tantivy, as well
|
||||
/// whether it was compiled with the simd compression.
|
||||
pub fn version() -> &'static str {
|
||||
if cfg!(feature = "simdcompression") {
|
||||
concat!(env!("CARGO_PKG_VERSION"), "-simd")
|
||||
} else {
|
||||
concat!(env!("CARGO_PKG_VERSION"), "-nosimd")
|
||||
}
|
||||
env!("CARGO_PKG_VERSION")
|
||||
}
|
||||
|
||||
/// Defines tantivy's merging strategy
|
||||
@@ -348,7 +342,7 @@ mod tests {
|
||||
let index = Index::create_from_tempdir(schema).unwrap();
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"af b");
|
||||
index_writer.add_document(doc);
|
||||
@@ -370,7 +364,7 @@ mod tests {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.commit().unwrap();
|
||||
@@ -412,7 +406,7 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"a b c");
|
||||
index_writer.add_document(doc);
|
||||
@@ -440,7 +434,7 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"a b c");
|
||||
index_writer.add_document(doc);
|
||||
@@ -487,7 +481,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// 0
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
// 1
|
||||
@@ -534,7 +528,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// 0
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
// 1
|
||||
@@ -571,7 +565,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
index_writer.delete_term(Term::from_field_text(text_field, "c"));
|
||||
index_writer.rollback().unwrap();
|
||||
@@ -620,7 +614,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(field=>1u64));
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
@@ -643,7 +637,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let negative_val = -1i64;
|
||||
index_writer.add_document(doc!(value_field => negative_val));
|
||||
index_writer.commit().unwrap();
|
||||
@@ -667,7 +661,7 @@ mod tests {
|
||||
let absent_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(2, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"a"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
assert!(index.load_searchers().is_ok());
|
||||
@@ -684,7 +678,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(2, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
|
||||
|
||||
let add_document = |index_writer: &mut IndexWriter, val: &'static str| {
|
||||
let doc = doc!(text_field=>val);
|
||||
@@ -720,7 +714,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"af af af bc bc");
|
||||
index_writer.add_document(doc);
|
||||
@@ -756,7 +750,7 @@ mod tests {
|
||||
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"af af af b"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
index_writer.add_document(doc!(text_field=>"a b c d"));
|
||||
@@ -809,7 +803,7 @@ mod tests {
|
||||
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field=>"af b");
|
||||
index_writer.add_document(doc);
|
||||
|
||||
@@ -280,7 +280,7 @@ pub mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
doc.add_text(text_field, "g b b d c g c");
|
||||
@@ -322,7 +322,7 @@ pub mod tests {
|
||||
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for i in 0..num_docs {
|
||||
let mut doc = Document::default();
|
||||
doc.add_u64(value_field, 2);
|
||||
@@ -399,7 +399,7 @@ pub mod tests {
|
||||
|
||||
// delete some of the documents
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.delete_term(term_0);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
@@ -449,7 +449,7 @@ pub mod tests {
|
||||
|
||||
// delete everything else
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.delete_term(term_1);
|
||||
|
||||
assert!(index_writer.commit().is_ok());
|
||||
@@ -457,25 +457,14 @@ pub mod tests {
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
let searcher = index.searcher();
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
|
||||
// finally, check that it's empty
|
||||
{
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(segment_postings.skip_next(0), SkipResult::Reached);
|
||||
assert_eq!(segment_postings.doc(), 0);
|
||||
assert!(segment_reader.is_deleted(0));
|
||||
|
||||
let mut segment_postings = segment_reader
|
||||
.inverted_index(term_2.field())
|
||||
.read_postings(&term_2, IndexRecordOption::Basic)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(segment_postings.skip_next(num_docs), SkipResult::End);
|
||||
let searchable_segment_ids = index
|
||||
.searchable_segment_ids()
|
||||
.expect("could not get index segment ids");
|
||||
assert!(searchable_segment_ids.is_empty());
|
||||
assert_eq!(searcher.num_docs(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,7 +495,7 @@ pub mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
let posting_list_size = 1_000_000;
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for _ in 0..posting_list_size {
|
||||
let mut doc = Document::default();
|
||||
if rng.gen_bool(1f64 / 15f64) {
|
||||
|
||||
@@ -124,21 +124,17 @@ impl SegmentPostings {
|
||||
}
|
||||
|
||||
fn exponential_search(target: u32, arr: &[u32]) -> (usize, usize) {
|
||||
let mut start = 0;
|
||||
let end = arr.len();
|
||||
debug_assert!(arr.len() <= 128);
|
||||
debug_assert!(target <= arr[end - 1]);
|
||||
let mut jump = 1;
|
||||
loop {
|
||||
let new = start + jump;
|
||||
if new >= end {
|
||||
return (start, end);
|
||||
let mut begin = 0;
|
||||
for &pivot in [1,3,7,15,31,63].iter().take_while(|&&el| el < end) {
|
||||
if arr[pivot] > target {
|
||||
return (begin, pivot);
|
||||
}
|
||||
if arr[new] > target {
|
||||
return (start, new);
|
||||
}
|
||||
start = new;
|
||||
jump *= 2;
|
||||
begin = pivot;
|
||||
}
|
||||
(begin, end)
|
||||
}
|
||||
|
||||
/// Search the first index containing an element greater or equal to the target.
|
||||
@@ -752,7 +748,7 @@ mod tests {
|
||||
let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
let mut last_doc = 0u32;
|
||||
for &doc in docs {
|
||||
for _ in last_doc..doc {
|
||||
@@ -823,7 +819,7 @@ mod tests {
|
||||
let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
// create two postings list, one containg even number,
|
||||
// the other containing odd numbers.
|
||||
for i in 0..6 {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use core::SegmentReader;
|
||||
use downcast::Downcast;
|
||||
use downcast_rs::Downcast;
|
||||
use query::intersect_scorers;
|
||||
use query::score_combiner::{DoNothingCombiner, ScoreCombiner, SumWithCoordsCombiner};
|
||||
use query::term_query::TermScorer;
|
||||
@@ -10,7 +10,6 @@ use query::RequiredOptionalScorer;
|
||||
use query::Scorer;
|
||||
use query::Union;
|
||||
use query::Weight;
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::HashMap;
|
||||
use Result;
|
||||
|
||||
@@ -25,13 +24,12 @@ where
|
||||
|
||||
{
|
||||
let is_all_term_queries = scorers.iter().all(|scorer| {
|
||||
let scorer_ref: &Scorer = scorer.borrow();
|
||||
Downcast::<TermScorer>::is_type(scorer_ref)
|
||||
scorer.is::<TermScorer>()
|
||||
});
|
||||
if is_all_term_queries {
|
||||
let scorers: Vec<TermScorer> = scorers
|
||||
.into_iter()
|
||||
.map(|scorer| *Downcast::<TermScorer>::downcast(scorer).unwrap())
|
||||
.map(|scorer| *(scorer.downcast::<TermScorer>().map_err(|_| ()).unwrap() ))
|
||||
.collect();
|
||||
let scorer: Box<Scorer> = Box::new(Union::<TermScorer, TScoreCombiner>::from(scorers));
|
||||
return scorer;
|
||||
|
||||
@@ -8,7 +8,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use collector::tests::TestCollector;
|
||||
use downcast::Downcast;
|
||||
use downcast_rs::Downcast;
|
||||
use query::score_combiner::SumWithCoordsCombiner;
|
||||
use query::term_query::TermScorer;
|
||||
use query::Intersection;
|
||||
@@ -29,7 +29,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field => "a b c");
|
||||
index_writer.add_document(doc);
|
||||
@@ -72,7 +72,7 @@ mod tests {
|
||||
let searcher = index.searcher();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<TermScorer>::is_type(&*scorer));
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -84,13 +84,13 @@ mod tests {
|
||||
let query = query_parser.parse_query("+a +b +c").unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<Intersection<TermScorer>>::is_type(&*scorer));
|
||||
assert!(scorer.is::<Intersection<TermScorer>>());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("+a +(b c)").unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<Intersection<Box<Scorer>>>::is_type(&*scorer));
|
||||
assert!(scorer.is::<Intersection<Box<Scorer>>>());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,18 +103,16 @@ mod tests {
|
||||
let query = query_parser.parse_query("+a b").unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<
|
||||
RequiredOptionalScorer<Box<Scorer>, Box<Scorer>, SumWithCoordsCombiner>,
|
||||
>::is_type(&*scorer));
|
||||
assert!(scorer.is::<RequiredOptionalScorer<Box<Scorer>, Box<Scorer>, SumWithCoordsCombiner>>());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("+a b").unwrap();
|
||||
let weight = query.weight(&searcher, false).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
println!("{:?}", scorer.type_name());
|
||||
assert!(Downcast::<TermScorer>::is_type(&*scorer));
|
||||
assert!(scorer.is::<TermScorer>());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
pub fn test_boolean_query() {
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
use docset::{DocSet, SkipResult};
|
||||
use downcast::Downcast;
|
||||
use query::term_query::TermScorer;
|
||||
use query::EmptyScorer;
|
||||
use query::Scorer;
|
||||
use std::borrow::Borrow;
|
||||
use DocId;
|
||||
use Score;
|
||||
use query::term_query::TermScorer;
|
||||
|
||||
/// Returns the intersection scorer.
|
||||
///
|
||||
@@ -26,13 +24,12 @@ pub fn intersect_scorers(mut scorers: Vec<Box<Scorer>>) -> Box<Scorer> {
|
||||
(Some(single_docset), None) => single_docset,
|
||||
(Some(left), Some(right)) => {
|
||||
{
|
||||
let all_term_scorers = [&left, &right].into_iter().all(|scorer| {
|
||||
let scorer_ref: &Scorer = (*scorer).borrow();
|
||||
Downcast::<TermScorer>::is_type(scorer_ref)
|
||||
let all_term_scorers = [&left, &right].iter().all(|&scorer| {
|
||||
scorer.is::<TermScorer>()
|
||||
});
|
||||
if all_term_scorers {
|
||||
let left = *Downcast::<TermScorer>::downcast(left).unwrap();
|
||||
let right = *Downcast::<TermScorer>::downcast(right).unwrap();
|
||||
let left = *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap());
|
||||
let right = *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap());
|
||||
return Box::new(Intersection {
|
||||
left,
|
||||
right,
|
||||
|
||||
@@ -24,7 +24,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for &text in texts {
|
||||
let doc = doc!(text_field=>text);
|
||||
index_writer.add_document(doc);
|
||||
@@ -86,7 +86,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"a b c"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
@@ -141,7 +141,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"b"));
|
||||
index_writer.add_document(doc!(text_field=>"a b"));
|
||||
index_writer.add_document(doc!(text_field=>"b a"));
|
||||
@@ -173,7 +173,7 @@ mod tests {
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field=>"a b c d e f g h"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use super::Weight;
|
||||
use core::searcher::Searcher;
|
||||
use downcast;
|
||||
use downcast_rs;
|
||||
use std::collections::BTreeSet;
|
||||
use std::fmt;
|
||||
use Result;
|
||||
@@ -39,7 +39,7 @@ use Term;
|
||||
///
|
||||
/// When implementing a new type of `Query`, it is normal to implement a
|
||||
/// dedicated `Query`, `Weight` and `Scorer`.
|
||||
pub trait Query: QueryClone + downcast::Any + fmt::Debug {
|
||||
pub trait Query: QueryClone + downcast_rs::Downcast + fmt::Debug {
|
||||
/// Create the weight associated to a query.
|
||||
///
|
||||
/// If scoring is not required, setting `scoring_enabled` to `false`
|
||||
@@ -96,7 +96,4 @@ impl QueryClone for Box<Query> {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
mod downcast_impl {
|
||||
downcast!(super::Query);
|
||||
}
|
||||
impl_downcast!(Query);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use common::BitSet;
|
||||
use docset::{DocSet, SkipResult};
|
||||
use downcast;
|
||||
use downcast_rs;
|
||||
use std::ops::DerefMut;
|
||||
use DocId;
|
||||
use Score;
|
||||
@@ -8,7 +8,7 @@ use Score;
|
||||
/// Scored set of documents matching a query within a specific segment.
|
||||
///
|
||||
/// See [`Query`](./trait.Query.html).
|
||||
pub trait Scorer: downcast::Any + DocSet + 'static {
|
||||
pub trait Scorer: downcast_rs::Downcast + DocSet + 'static {
|
||||
/// Returns the score.
|
||||
///
|
||||
/// This method will perform a bit of computation and is not cached.
|
||||
@@ -23,10 +23,8 @@ pub trait Scorer: downcast::Any + DocSet + 'static {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
mod downcast_impl {
|
||||
downcast!(super::Scorer);
|
||||
}
|
||||
impl_downcast!(Scorer);
|
||||
|
||||
|
||||
impl Scorer for Box<Scorer> {
|
||||
fn score(&mut self) -> Score {
|
||||
|
||||
@@ -25,7 +25,7 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc!(text_field => "a");
|
||||
index_writer.add_document(doc);
|
||||
|
||||
@@ -523,7 +523,7 @@ Survey in 2016, 2017, and 2018."#;
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(text_field => "a"));
|
||||
index_writer.add_document(doc!(text_field => "a"));
|
||||
index_writer.add_document(doc!(text_field => "a b"));
|
||||
@@ -580,7 +580,7 @@ Survey in 2016, 2017, and 2018."#;
|
||||
let index = Index::create_in_ram(schema);
|
||||
{
|
||||
// writing the segment
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
let doc = doc ! (text_field => TEST_TEXT);
|
||||
index_writer.add_document(doc);
|
||||
|
||||
@@ -133,7 +133,7 @@ mod tests {
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
{
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
{
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
|
||||
@@ -50,7 +50,6 @@ where
|
||||
self.token_mut().text.make_ascii_lowercase();
|
||||
} else {
|
||||
to_lowercase_unicode(&mut self.tail.token_mut().text, &mut self.buffer);
|
||||
|
||||
mem::swap(&mut self.tail.token_mut().text, &mut self.buffer);
|
||||
}
|
||||
true
|
||||
|
||||
@@ -73,7 +73,7 @@
|
||||
//! let en_stem = SimpleTokenizer
|
||||
//! .filter(RemoveLongFilter::limit(40))
|
||||
//! .filter(LowerCaser)
|
||||
//! .filter(Stemmer::new());
|
||||
//! .filter(Stemmer::new(Language::English));
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
@@ -148,7 +148,7 @@ pub use self::ngram_tokenizer::NgramTokenizer;
|
||||
pub use self::raw_tokenizer::RawTokenizer;
|
||||
pub use self::remove_long::RemoveLongFilter;
|
||||
pub use self::simple_tokenizer::SimpleTokenizer;
|
||||
pub use self::stemmer::Stemmer;
|
||||
pub use self::stemmer::{Language, Stemmer};
|
||||
pub use self::stop_word_filter::StopWordFilter;
|
||||
pub(crate) use self::token_stream_chain::TokenStreamChain;
|
||||
pub(crate) use self::tokenizer::box_tokenizer;
|
||||
@@ -159,8 +159,10 @@ pub use self::tokenizer_manager::TokenizerManager;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::Token;
|
||||
use super::TokenizerManager;
|
||||
use super::{
|
||||
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, Tokenizer,
|
||||
TokenizerManager,
|
||||
};
|
||||
|
||||
/// This is a function that can be used in tests and doc tests
|
||||
/// to assert a token's correctness.
|
||||
@@ -214,6 +216,7 @@ pub mod tests {
|
||||
.token_stream("Hello, happy tax payer!")
|
||||
.process(&mut add_token);
|
||||
}
|
||||
|
||||
assert_eq!(tokens.len(), 4);
|
||||
assert_token(&tokens[0], 0, "hello", 0, 5);
|
||||
assert_token(&tokens[1], 1, "happi", 7, 12);
|
||||
@@ -221,6 +224,33 @@ pub mod tests {
|
||||
assert_token(&tokens[3], 3, "payer", 17, 22);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_en_tokenizer() {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"es_stem",
|
||||
SimpleTokenizer
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new(Language::Spanish)),
|
||||
);
|
||||
let en_tokenizer = tokenizer_manager.get("es_stem").unwrap();
|
||||
let mut tokens: Vec<Token> = vec![];
|
||||
{
|
||||
let mut add_token = |token: &Token| {
|
||||
tokens.push(token.clone());
|
||||
};
|
||||
en_tokenizer
|
||||
.token_stream("Hola, feliz contribuyente!")
|
||||
.process(&mut add_token);
|
||||
}
|
||||
|
||||
assert_eq!(tokens.len(), 3);
|
||||
assert_token(&tokens[0], 0, "hola", 0, 4);
|
||||
assert_token(&tokens[1], 1, "feliz", 6, 11);
|
||||
assert_token(&tokens[2], 2, "contribuyent", 12, 25);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tokenizer_empty() {
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
|
||||
@@ -4,22 +4,76 @@ use super::{Token, TokenFilter, TokenStream};
|
||||
use rust_stemmers::{self, Algorithm};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// `Stemmer` token filter. Currently only English is supported.
|
||||
/// Tokens are expected to be lowercased beforehands.
|
||||
/// Available stemmer languages.
|
||||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum Language {
|
||||
Arabic,
|
||||
Danish,
|
||||
Dutch,
|
||||
English,
|
||||
Finnish,
|
||||
French,
|
||||
German,
|
||||
Hungarian,
|
||||
Italian,
|
||||
Portuguese,
|
||||
Romanian,
|
||||
Russian,
|
||||
Spanish,
|
||||
Swedish,
|
||||
Tamil,
|
||||
Turkish,
|
||||
}
|
||||
|
||||
impl Language {
|
||||
fn algorithm(self) -> Algorithm {
|
||||
use self::Language::*;
|
||||
match self {
|
||||
Arabic => Algorithm::Arabic,
|
||||
Danish => Algorithm::Danish,
|
||||
Dutch => Algorithm::Dutch,
|
||||
English => Algorithm::English,
|
||||
Finnish => Algorithm::Finnish,
|
||||
French => Algorithm::French,
|
||||
German => Algorithm::German,
|
||||
Hungarian => Algorithm::Hungarian,
|
||||
Italian => Algorithm::Italian,
|
||||
Portuguese => Algorithm::Portuguese,
|
||||
Romanian => Algorithm::Romanian,
|
||||
Russian => Algorithm::Russian,
|
||||
Spanish => Algorithm::Spanish,
|
||||
Swedish => Algorithm::Swedish,
|
||||
Tamil => Algorithm::Tamil,
|
||||
Turkish => Algorithm::Turkish,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `Stemmer` token filter. Several languages are supported, see `Language` for the available
|
||||
/// languages.
|
||||
/// Tokens are expected to be lowercased beforehand.
|
||||
#[derive(Clone)]
|
||||
pub struct Stemmer {
|
||||
stemmer_algorithm: Arc<Algorithm>,
|
||||
}
|
||||
|
||||
impl Stemmer {
|
||||
/// Creates a new Stemmer `TokenFilter`.
|
||||
pub fn new() -> Stemmer {
|
||||
/// Creates a new Stemmer `TokenFilter` for a given language algorithm.
|
||||
pub fn new(language: Language) -> Stemmer {
|
||||
Stemmer {
|
||||
stemmer_algorithm: Arc::new(Algorithm::English),
|
||||
stemmer_algorithm: Arc::new(language.algorithm()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Stemmer {
|
||||
/// Creates a new Stemmer `TokenFilter` for English.
|
||||
fn default() -> Self {
|
||||
Stemmer::new(Language::English)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TailTokenStream> TokenFilter<TailTokenStream> for Stemmer
|
||||
where
|
||||
TailTokenStream: TokenStream,
|
||||
|
||||
@@ -64,7 +64,7 @@ pub trait Tokenizer<'a>: Sized + Clone {
|
||||
/// let en_stem = SimpleTokenizer
|
||||
/// .filter(RemoveLongFilter::limit(40))
|
||||
/// .filter(LowerCaser)
|
||||
/// .filter(Stemmer::new());
|
||||
/// .filter(Stemmer::default());
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tokenizer::box_tokenizer;
|
||||
use tokenizer::stemmer::Language;
|
||||
use tokenizer::BoxedTokenizer;
|
||||
use tokenizer::LowerCaser;
|
||||
use tokenizer::RawTokenizer;
|
||||
@@ -71,7 +72,7 @@ impl Default for TokenizerManager {
|
||||
SimpleTokenizer
|
||||
.filter(RemoveLongFilter::limit(40))
|
||||
.filter(LowerCaser)
|
||||
.filter(Stemmer::new()),
|
||||
.filter(Stemmer::new(Language::English)),
|
||||
);
|
||||
manager
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user