This commit is contained in:
Paul Masurel
2019-02-16 16:09:16 +09:00
39 changed files with 397 additions and 336 deletions

View File

@@ -170,10 +170,8 @@ fn main() -> tantivy::Result<()> {
//
// ### Searcher
//
// Let's search our index. Start by reloading
// searchers in the index. This should be done
// after every `commit()`.
index.load_searchers()?;
// TODO explain what a reader does
let reader = index.reader();
// We now need to acquire a searcher.
// Some search experience might require more than
@@ -187,7 +185,7 @@ fn main() -> tantivy::Result<()> {
// You should acquire a searcher every time you
// start processing a request and
// and release it right after your query is finished.
let searcher = index.searcher();
let searcher = reader.searcher();
// ### Query

View File

@@ -170,9 +170,8 @@ fn main() -> tantivy::Result<()> {
price => 5_200u64
));
index_writer.commit()?;
index.load_searchers()?;
let searcher = index.searcher();
let searcher = index.reader().searcher();
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
// here we want to get a hit on the 'ken' in Frankenstein

View File

@@ -91,9 +91,8 @@ fn main() -> tantivy::Result<()> {
increasing confidence in the success of my undertaking."#
));
index_writer.commit()?;
index.load_searchers()?;
let searcher = index.searcher();
let searcher = index.reader().searcher();
// The query parser can interpret human queries.
// Here, if the user does not specify which

View File

@@ -14,12 +14,16 @@ use tantivy::collector::TopDocs;
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::Index;
use tantivy::IndexReader;
// A simple helper function to fetch a single document
// given its id from our index.
// It will be helpful to check our work.
fn extract_doc_given_isbn(index: &Index, isbn_term: &Term) -> tantivy::Result<Option<Document>> {
let searcher = index.searcher();
fn extract_doc_given_isbn(
reader: &IndexReader,
isbn_term: &Term,
) -> tantivy::Result<Option<Document>> {
let searcher = reader.searcher();
// This is the simplest query you can think of.
// It matches all of the documents containing a specific term.
@@ -85,12 +89,12 @@ fn main() -> tantivy::Result<()> {
isbn => "978-9176370711",
));
index_writer.commit()?;
index.load_searchers()?;
let reader = index.reader();
let frankenstein_isbn = Term::from_field_text(isbn, "978-9176370711");
// Oops our frankenstein doc seems mispelled
let frankenstein_doc_misspelled = extract_doc_given_isbn(&index, &frankenstein_isbn)?.unwrap();
let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
assert_eq!(
schema.to_json(&frankenstein_doc_misspelled),
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
@@ -129,10 +133,10 @@ fn main() -> tantivy::Result<()> {
// Everything happened as if the document was updated.
index_writer.commit()?;
// We reload our searcher to make our change available to clients.
index.load_searchers()?;
reader.load_searchers()?;
// No more typo!
let frankenstein_new_doc = extract_doc_given_isbn(&index, &frankenstein_isbn)?.unwrap();
let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
assert_eq!(
schema.to_json(&frankenstein_new_doc),
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,

View File

@@ -55,9 +55,9 @@ fn main() -> tantivy::Result<()> {
index_writer.commit()?;
index.load_searchers()?;
let reader = index.reader();
let searcher = index.searcher();
let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field(tags);
facet_collector.add_facet("/pools");

View File

@@ -18,16 +18,16 @@ fn run() -> Result<()> {
let year_field = schema_builder.add_u64_field("year", INT_INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
{
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
for year in 1950u64..2019u64 {
index_writer.add_document(doc!(year_field => year));
}
index_writer.commit()?;
// The index will be a range of years
let reader = index.reader();
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
for year in 1950u64..2019u64 {
index_writer.add_document(doc!(year_field => year));
}
index.load_searchers()?;
let searcher = index.searcher();
index_writer.commit()?;
// The index will be a range of years
let searcher = reader.searcher();
// The end is excluded i.e. here we are searching up to 1969
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
// Uses a Count collector to sum the total number of docs in the range

View File

@@ -33,9 +33,9 @@ fn main() -> tantivy::Result<()> {
index_writer.add_document(doc!(title => "The modern Promotheus"));
index_writer.commit()?;
index.load_searchers()?;
let reader = index.reader();
let searcher = index.searcher();
let searcher = reader.searcher();
// A tantivy index is actually a collection of segments.
// Similarly, a searcher just wraps a list `segment_reader`.

View File

@@ -48,9 +48,7 @@ fn main() -> tantivy::Result<()> {
// ...
index_writer.commit()?;
index.load_searchers()?;
let searcher = index.searcher();
let searcher = index.reader().searcher();
let query_parser = QueryParser::for_index(&index, vec![title, body]);
let query = query_parser.parse_query("sycamore spring")?;

View File

@@ -96,9 +96,9 @@ fn main() -> tantivy::Result<()> {
index_writer.commit()?;
index.load_searchers()?;
let reader = index.reader();
let searcher = index.searcher();
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, body]);

View File

@@ -483,8 +483,7 @@ mod tests {
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top1"));
let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
@@ -532,8 +531,7 @@ mod tests {
facet_field => Facet::from_text(&"/subjects/B/b"),
));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
assert_eq!(searcher.num_docs(), 1);
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/subjects");
@@ -579,9 +577,7 @@ mod tests {
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/facet");
@@ -635,8 +631,7 @@ mod bench {
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let reader = index.reader();
b.iter(|| {
let searcher = index.searcher();
let facet_collector = FacetCollector::for_field(facet_field);

View File

@@ -101,8 +101,7 @@ mod tests {
assert_eq!(index_writer.commit().unwrap(), 10u64);
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);

View File

@@ -278,8 +278,7 @@ mod tests {
index_writer.add_document(doc!(text=>"abc"));
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let term = Term::from_field_text(text, "abc");
let query = TermQuery::new(term, IndexRecordOption::Basic);

View File

@@ -171,7 +171,7 @@ mod tests {
size => 16u64,
));
});
let searcher = index.searcher();
let searcher = index.reader().searcher();
let top_collector = TopDocs::with_limit(4).order_by_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
@@ -198,7 +198,7 @@ mod tests {
size => 12u64,
));
});
let searcher = index.searcher();
let searcher = index.reader().searcher();
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2));
let segment_reader = searcher.segment_reader(0u32);
top_collector
@@ -218,7 +218,7 @@ mod tests {
size => 12u64,
));
});
let searcher = index.searcher();
let searcher = index.reader().searcher();
let segment = searcher.segment_reader(0);
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size);
assert_matches!(
@@ -241,8 +241,6 @@ mod tests {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer);
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]);
let query = query_parser.parse_query(query).unwrap();
(index, query)

View File

@@ -148,7 +148,6 @@ mod tests {
index_writer.add_document(doc!(text_field=>"I like Droopy"));
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
index
}
@@ -159,6 +158,7 @@ mod tests {
let query_parser = QueryParser::for_index(&index, vec![field]);
let text_query = query_parser.parse_query("droopy tax").unwrap();
let score_docs: Vec<(Score, DocAddress)> = index
.reader()
.searcher()
.search(&text_query, &TopDocs::with_limit(4))
.unwrap();
@@ -179,6 +179,7 @@ mod tests {
let query_parser = QueryParser::for_index(&index, vec![field]);
let text_query = query_parser.parse_query("droopy tax").unwrap();
let score_docs: Vec<(Score, DocAddress)> = index
.reader()
.searcher()
.search(&text_query, &TopDocs::with_limit(2))
.unwrap();

View File

@@ -1,19 +1,14 @@
use super::pool::LeasedItem;
use super::pool::Pool;
use super::segment::create_segment;
use super::segment::Segment;
use core::searcher::Searcher;
use core::Executor;
use core::IndexMeta;
use core::SegmentId;
use core::SegmentMeta;
use core::SegmentReader;
use core::META_FILEPATH;
use directory::ManagedDirectory;
#[cfg(feature = "mmap")]
use directory::MmapDirectory;
use directory::INDEX_WRITER_LOCK;
use directory::META_LOCK;
use directory::{Directory, RAMDirectory};
use error::DataCorruption;
use error::TantivyError;
@@ -21,6 +16,8 @@ use indexer::index_writer::open_index_writer;
use indexer::index_writer::HEAP_SIZE_MIN;
use indexer::segment_updater::save_new_metas;
use num_cpus;
use reader::IndexReaderBuilder;
use reader::{IndexReader, ReloadPolicy};
use schema::Field;
use schema::FieldType;
use schema::Schema;
@@ -28,7 +25,6 @@ use serde_json;
use std::borrow::BorrowMut;
use std::fmt;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use tokenizer::BoxedTokenizer;
use tokenizer::TokenizerManager;
@@ -52,8 +48,6 @@ fn load_metas(directory: &Directory) -> Result<IndexMeta> {
pub struct Index {
directory: ManagedDirectory,
schema: Schema,
num_searchers: Arc<AtomicUsize>,
searcher_pool: Arc<Pool<Searcher>>,
executor: Arc<Executor>,
tokenizers: TokenizerManager,
}
@@ -158,16 +152,12 @@ impl Index {
/// Creates a new index given a directory and an `IndexMeta`.
fn create_from_metas(directory: ManagedDirectory, metas: &IndexMeta) -> Result<Index> {
let schema = metas.schema.clone();
let n_cpus = num_cpus::get();
let index = Index {
directory,
schema,
num_searchers: Arc::new(AtomicUsize::new(n_cpus)),
searcher_pool: Arc::new(Pool::new()),
tokenizers: TokenizerManager::default(),
executor: Arc::new(Executor::single_thread()),
};
index.load_searchers()?;
Ok(index)
}
@@ -197,6 +187,14 @@ impl Index {
}
}
pub fn reader(&self) -> IndexReader {
self.reader_builder().into()
}
pub fn reader_builder(&self) -> IndexReaderBuilder {
IndexReaderBuilder::new(self.clone())
}
/// Opens a new directory from an index path.
#[cfg(feature = "mmap")]
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
@@ -335,53 +333,6 @@ impl Index {
.map(|segment_meta| segment_meta.id())
.collect())
}
/// Sets the number of searchers to use
///
/// Only works after the next call to `load_searchers`
pub fn set_num_searchers(&mut self, num_searchers: usize) {
self.num_searchers.store(num_searchers, Ordering::Release);
}
/// Update searchers so that they reflect the state of the last
/// `.commit()`.
///
/// If indexing happens in the same process as searching,
/// you most likely want to call `.load_searchers()` right after each
/// successful call to `.commit()`.
///
/// If indexing and searching happen in different processes, the way to
/// get the freshest `index` at all time, is to watch `meta.json` and
/// call `load_searchers` whenever a changes happen.
pub fn load_searchers(&self) -> Result<()> {
let _meta_lock = self.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = self.searchable_segments()?;
let segment_readers: Vec<SegmentReader> = searchable_segments
.iter()
.map(SegmentReader::open)
.collect::<Result<_>>()?;
let schema = self.schema();
let num_searchers: usize = self.num_searchers.load(Ordering::Acquire);
let searchers = (0..num_searchers)
.map(|_| Searcher::new(schema.clone(), self.clone(), segment_readers.clone()))
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}
/// Returns a searcher
///
/// This method should be called every single time a search
/// query is performed.
/// The searchers are taken from a pool of `num_searchers` searchers.
/// If no searcher is available
/// this may block.
///
/// The same searcher must be used for a given query, as it ensures
/// the use of a consistent segment set.
pub fn searcher(&self) -> LeasedItem<Searcher> {
self.searcher_pool.acquire()
}
}
impl fmt::Debug for Index {
@@ -395,8 +346,6 @@ impl Clone for Index {
Index {
directory: self.directory.clone(),
schema: self.schema.clone(),
num_searchers: Arc::clone(&self.num_searchers),
searcher_pool: Arc::clone(&self.searcher_pool),
tokenizers: self.tokenizers.clone(),
executor: self.executor.clone(),
}

View File

@@ -2,7 +2,6 @@ mod executor;
pub mod index;
mod index_meta;
mod inverted_index_reader;
mod pool;
pub mod searcher;
mod segment;
mod segment_component;

View File

@@ -477,9 +477,7 @@ mod test {
// ok, now we should have a deleted doc
index_writer2.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
assert_eq!(vec![0u32, 2u32], docs);
}

View File

@@ -22,9 +22,7 @@ mod tests {
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
index_writer.add_document(doc!(field=>vec![0u8; 1000]));
assert!(index_writer.commit().is_ok());
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let reader = searcher.segment_reader(0);
let bytes_reader = reader.bytes_fast_field_reader(field).unwrap();

View File

@@ -28,11 +28,12 @@ mod tests {
index_writer.add_document(doc!(field=>5u64, field=>20u64,field=>1u64));
assert!(index_writer.commit().is_ok());
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = searcher.segment_reader(0);
let searcher = index.reader().searcher();
let segment_reader = searcher.segment_reader(0);
let mut vals = Vec::new();
let multi_value_reader = reader.multi_fast_field_reader::<u64>(field).unwrap();
let multi_value_reader = segment_reader
.multi_fast_field_reader::<u64>(field)
.unwrap();
{
multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[4u64]);
@@ -63,8 +64,7 @@ mod tests {
index_writer.add_document(doc!(field=> -5i64, field => -20i64, field=>1i64));
assert!(index_writer.commit().is_ok());
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let reader = searcher.segment_reader(0);
let mut vals = Vec::new();
let multi_value_reader = reader.multi_fast_field_reader::<i64>(field).unwrap();

View File

@@ -75,8 +75,7 @@ mod tests {
index_writer.add_document(doc);
}
index_writer.commit().expect("Commit failed");
index.load_searchers().expect("Reloading searchers");
let searcher = index.searcher();
let searcher = index.reader().searcher();
let segment_reader = searcher.segment_reader(0);
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();

View File

@@ -22,6 +22,7 @@ fn test_indexing() {
let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap();
let reader = index.reader();
let mut rng = thread_rng();
@@ -36,8 +37,8 @@ fn test_indexing() {
index_writer.commit().expect("Commit failed");
committed_docs.extend(&uncommitted_docs);
uncommitted_docs.clear();
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
// check that everything is correct.
check_index_content(&searcher, &committed_docs);
} else {

View File

@@ -43,8 +43,8 @@ pub const HEAP_SIZE_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
// reaches `PIPELINE_MAX_SIZE_IN_DOCS`
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
type DocumentSender = channel::Sender<AddOperation>;
type DocumentReceiver = channel::Receiver<AddOperation>;
type OperationSender = channel::Sender<AddOperation>;
type OperationReceiver = channel::Receiver<AddOperation>;
/// Split the thread memory budget into
/// - the heap size
@@ -84,8 +84,8 @@ pub struct IndexWriter {
workers_join_handle: Vec<JoinHandle<Result<()>>>,
document_receiver: DocumentReceiver,
document_sender: DocumentSender,
operation_receiver: OperationReceiver,
operation_sender: OperationSender,
segment_updater: SegmentUpdater,
@@ -132,7 +132,7 @@ pub fn open_index_writer(
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
return Err(TantivyError::InvalidArgument(err_msg));
}
let (document_sender, document_receiver): (DocumentSender, DocumentReceiver) =
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
let delete_queue = DeleteQueue::new();
@@ -150,8 +150,8 @@ pub fn open_index_writer(
heap_size_in_bytes_per_thread,
index: index.clone(),
document_receiver,
document_sender,
operation_receiver: document_receiver,
operation_sender: document_sender,
segment_updater,
@@ -339,7 +339,7 @@ impl IndexWriter {
pub fn wait_merging_threads(mut self) -> Result<()> {
// this will stop the indexing thread,
// dropping the last reference to the segment_updater.
drop(self.document_sender);
drop(self.operation_sender);
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
for join_handle in former_workers_handles {
@@ -388,7 +388,7 @@ impl IndexWriter {
/// The thread consumes documents from the pipeline.
///
fn add_indexing_worker(&mut self) -> Result<()> {
let document_receiver_clone = self.document_receiver.clone();
let document_receiver_clone = self.operation_receiver.clone();
let mut segment_updater = self.segment_updater.clone();
let generation = self.generation;
@@ -479,11 +479,11 @@ impl IndexWriter {
/// when no documents are remaining.
///
/// Returns the former segment_ready channel.
fn recreate_document_channel(&mut self) -> DocumentReceiver {
let (document_sender, document_receiver): (DocumentSender, DocumentReceiver) =
fn recreate_document_channel(&mut self) -> OperationReceiver {
let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
mem::replace(&mut self.document_sender, document_sender);
mem::replace(&mut self.document_receiver, document_receiver)
mem::replace(&mut self.operation_sender, document_sender);
mem::replace(&mut self.operation_receiver, document_receiver)
}
/// Rollback to the last commit
@@ -501,7 +501,7 @@ impl IndexWriter {
// segment updates will be ignored.
self.segment_updater.kill();
let document_receiver = self.document_receiver.clone();
let document_receiver = self.operation_receiver.clone();
// take the directory lock to create a new index_writer.
let directory_lock = self
@@ -663,7 +663,7 @@ impl IndexWriter {
pub fn add_document(&mut self, document: Document) -> u64 {
let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document };
let send_result = self.document_sender.send(add_operation);
let send_result = self.operation_sender.send(add_operation);
if let Err(e) = send_result {
panic!("Failed to index document. Sending to indexing channel failed. This probably means all of the indexing threads have panicked. {:?}", e);
}
@@ -681,6 +681,8 @@ mod tests {
use schema::{self, Document};
use Index;
use Term;
use snap::Reader;
use IndexReader;
#[test]
fn test_lockfile_stops_duplicates() {
@@ -737,9 +739,9 @@ mod tests {
let _index_writer_two = index.writer(3_000_000).unwrap();
}
fn num_docs_containing_text(index: &Index, term: &str) -> u64 {
let searcher = index.searcher();
let text_field = index.schema().get_field("text").unwrap();
fn num_docs_containing_text(reader: &IndexReader, term: &str) -> u64 {
let searcher = reader.searcher();
let text_field = reader.schema().get_field("text").unwrap();
let term = Term::from_field_text(text_field, term);
searcher.doc_freq(&term)
}
@@ -749,6 +751,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
// writing the segment
let mut index_writer = index.writer(3_000_000).unwrap();
@@ -756,51 +759,50 @@ mod tests {
index_writer.rollback().unwrap();
assert_eq!(index_writer.commit_opstamp(), 0u64);
assert_eq!(num_docs_containing_text(&index, "a"), 0);
assert_eq!(num_docs_containing_text(&reader, "a"), 0);
{
index_writer.add_document(doc!(text_field=>"b"));
index_writer.add_document(doc!(text_field=>"c"));
}
assert!(index_writer.commit().is_ok());
index.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&index, "a"), 0u64);
assert_eq!(num_docs_containing_text(&index, "b"), 1u64);
assert_eq!(num_docs_containing_text(&index, "c"), 1u64);
reader.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "b"), 1u64);
assert_eq!(num_docs_containing_text(&reader, "c"), 1u64);
index_writer.rollback().unwrap();
index.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&index, "a"), 0u64);
assert_eq!(num_docs_containing_text(&index, "b"), 1u64);
assert_eq!(num_docs_containing_text(&index, "c"), 1u64);
reader.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "b"), 1u64);
assert_eq!(num_docs_containing_text(&reader, "c"), 1u64);
}
#[test]
fn test_softcommit_and_rollback() {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
// writing the segment
let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a"));
index_writer.rollback().unwrap();
assert_eq!(index_writer.commit_opstamp(), 0u64);
assert_eq!(num_docs_containing_text(&index, "a"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
{
index_writer.add_document(doc!(text_field=>"b"));
index_writer.add_document(doc!(text_field=>"c"));
}
assert!(index_writer.soft_commit().is_ok());
index.load_searchers().unwrap(); // we need to load soft committed stuff.
assert_eq!(num_docs_containing_text(&index, "a"), 0u64);
assert_eq!(num_docs_containing_text(&index, "b"), 1u64);
assert_eq!(num_docs_containing_text(&index, "c"), 1u64);
reader.load_searchers().unwrap(); // we need to load soft committed stuff.
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "b"), 1u64);
assert_eq!(num_docs_containing_text(&reader, "c"), 1u64);
index_writer.rollback().unwrap();
index.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&index, "a"), 0u64);
assert_eq!(num_docs_containing_text(&index, "b"), 0u64);
assert_eq!(num_docs_containing_text(&index, "c"), 0u64);
reader.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "b"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "c"), 0u64);
}
#[test]
@@ -808,30 +810,27 @@ mod tests {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
{
// writing the segment
let mut index_writer = index.writer(12_000_000).unwrap();
// create 8 segments with 100 tiny docs
for _doc in 0..100 {
let mut doc = Document::default();
doc.add_text(text_field, "a");
index_writer.add_document(doc);
index_writer.add_document(doc!(text_field=>"a"));
}
index_writer.commit().expect("commit failed");
for _doc in 0..100 {
let mut doc = Document::default();
doc.add_text(text_field, "a");
index_writer.add_document(doc);
index_writer.add_document(doc!(text_field=>"a"));
}
// this should create 8 segments and trigger a merge.
index_writer.commit().expect("commit failed");
index_writer
.wait_merging_threads()
.expect("waiting merging thread failed");
index.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&index, "a"), 200);
reader.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&reader, "a"), 200);
assert!(index.searchable_segments().unwrap().len() < 8);
}
}
@@ -874,7 +873,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
@@ -896,9 +895,8 @@ mod tests {
}
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
assert_eq!(num_docs_containing_text(&index, "a"), 0);
assert_eq!(num_docs_containing_text(&index, "b"), 100);
assert_eq!(num_docs_containing_text(&reader, "a"), 0);
assert_eq!(num_docs_containing_text(&reader, "b"), 100);
}
#[test]
@@ -927,7 +925,6 @@ mod tests {
index_writer.add_document(doc!(text_field => "b"));
}
assert!(index_writer.commit().is_err());
index.load_searchers().unwrap();
assert_eq!(num_docs_containing(&index, "a"), 100);
assert_eq!(num_docs_containing(&index, "b"), 0);
fail::cfg("RAMDirectory::atomic_write", "off").unwrap();

View File

@@ -675,7 +675,7 @@ mod tests {
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
let add_score_bytes = |doc: &mut Document, score: u32| {
let mut bytes = Vec::new();
bytes
@@ -744,8 +744,8 @@ mod tests {
index_writer.wait_merging_threads().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
let top_docs = searcher.search(&query, &TestCollector).unwrap();
@@ -837,7 +837,7 @@ mod tests {
let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let reader = index.reader();
let search_term = |searcher: &Searcher, term: Term| {
let collector = FastFieldTestCollector::for_field(score_field);
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
@@ -874,8 +874,8 @@ mod tests {
bytes_score_field => vec![0u8, 0, 0, 3],
));
index_writer.commit().expect("committed");
index.load_searchers().unwrap();
let ref searcher = *index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
@@ -921,8 +921,8 @@ mod tests {
bytes_score_field => vec![0u8, 0, 27, 88],
));
index_writer.commit().expect("committed");
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
assert_eq!(searcher.num_docs(), 3);
@@ -983,8 +983,8 @@ mod tests {
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
@@ -1029,8 +1029,8 @@ mod tests {
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
@@ -1080,9 +1080,9 @@ mod tests {
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index.load_searchers().unwrap();
reader.load_searchers().unwrap();
let ref searcher = *index.searcher();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
@@ -1130,9 +1130,9 @@ mod tests {
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
index.load_searchers().unwrap();
reader.load_searchers().unwrap();
let ref searcher = *index.searcher();
let searcher = reader.searcher();
assert!(segment_ids.is_empty());
assert!(searcher.segment_readers().is_empty());
assert_eq!(searcher.num_docs(), 0);
@@ -1144,6 +1144,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
@@ -1173,9 +1174,9 @@ mod tests {
index_writer.commit().expect("committed");
}
index.load_searchers().unwrap();
reader.load_searchers().unwrap();
let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
let searcher = index.searcher();
let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top"));
let (count, facet_counts) = searcher
@@ -1217,7 +1218,7 @@ mod tests {
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
index.load_searchers().unwrap();
reader.load_searchers().unwrap();
test_searcher(
11,
&[
@@ -1238,7 +1239,7 @@ mod tests {
let facet_term = Term::from_facet(facet_field, &facet);
index_writer.delete_term(facet_term);
index_writer.commit().unwrap();
index.load_searchers().unwrap();
reader.load_searchers().unwrap();
test_searcher(
9,
&[
@@ -1263,8 +1264,8 @@ mod tests {
index_writer.commit().expect("commit failed");
index_writer.add_document(doc!(int_field => 1u64));
index_writer.commit().expect("commit failed");
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2);
index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index
@@ -1275,10 +1276,10 @@ mod tests {
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index.load_searchers().unwrap();
reader.load_searchers().unwrap();
// commit has not been called yet. The document should still be
// there.
assert_eq!(index.searcher().num_docs(), 2);
assert_eq!(reader.searcher().num_docs(), 2);
}
#[test]
@@ -1289,7 +1290,7 @@ mod tests {
.set_indexed();
let int_field = schema_builder.add_u64_field("intvals", int_options);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut doc = Document::default();
@@ -1310,8 +1311,8 @@ mod tests {
.expect("Merging failed");
// assert delete has not been committed
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2);
index_writer.commit().unwrap();
@@ -1319,8 +1320,8 @@ mod tests {
index_writer.wait_merging_threads().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 0);
}
@@ -1360,9 +1361,10 @@ mod tests {
index_writer.commit().expect("committed");
}
index.load_searchers().unwrap();
let reader = index.reader();
reader.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = reader.searcher();
let mut vals: Vec<u64> = Vec::new();
@@ -1432,10 +1434,10 @@ mod tests {
index_writer.wait_merging_threads().unwrap();
}
index.load_searchers().unwrap();
reader.load_searchers().unwrap();
{
let searcher = index.searcher();
let searcher = reader.searcher();
println!(
"{:?}",
searcher

View File

@@ -580,9 +580,8 @@ mod tests {
index_writer.delete_term(term);
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
assert_eq!(index.searcher().num_docs(), 302);
let reader = index.reader();
assert_eq!(reader.searcher().num_docs(), 302);
{
index_writer
@@ -590,9 +589,9 @@ mod tests {
.expect("waiting for merging threads");
}
index.load_searchers().unwrap();
assert_eq!(index.searcher().segment_readers().len(), 1);
assert_eq!(index.searcher().num_docs(), 302);
reader.load_searchers().unwrap();
assert_eq!(reader.searcher().segment_readers().len(), 1);
assert_eq!(reader.searcher().num_docs(), 302);
}
#[test]
@@ -651,18 +650,18 @@ mod tests {
.expect("waiting for merging threads");
}
index.load_searchers().unwrap();
assert_eq!(index.searcher().num_docs(), 0);
let reader = index.reader();
assert_eq!(reader.searcher().num_docs(), 0);
let seg_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
assert!(seg_ids.is_empty());
index.load_searchers().unwrap();
assert_eq!(index.searcher().num_docs(), 0);
reader.load_searchers().unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
// empty segments should be erased
assert!(index.searchable_segment_metas().unwrap().is_empty());
assert!(index.searcher().segment_readers().is_empty());
assert!(reader.searcher().segment_readers().is_empty());
}
}

View File

@@ -186,6 +186,7 @@ pub use error::TantivyError as Error;
extern crate census;
extern crate owned_read;
extern crate snap;
/// Tantivy result.
pub type Result<T> = std::result::Result<T, error::TantivyError>;
@@ -210,6 +211,9 @@ pub mod space_usage;
pub mod store;
pub mod termdict;
mod reader;
pub use self::reader::{IndexReader, IndexReaderBuilder, ReloadPolicy};
mod snippet;
pub use self::snippet::{Snippet, SnippetGenerator};
@@ -298,6 +302,7 @@ mod tests {
use Index;
use IndexWriter;
use Postings;
use ReloadPolicy;
pub fn assert_nearly_equals(expected: f32, val: f32) {
assert!(
@@ -386,8 +391,8 @@ mod tests {
index_writer.commit().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let term_a = Term::from_field_text(text_field, "a");
assert_eq!(searcher.doc_freq(&term_a), 3);
let term_b = Term::from_field_text(text_field, "b");
@@ -414,8 +419,8 @@ mod tests {
index_writer.commit().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
let index_reader = index.reader();
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
{
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
@@ -450,8 +455,8 @@ mod tests {
index_writer.commit().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
@@ -500,8 +505,8 @@ mod tests {
index_writer.commit().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field);
assert!(inverted_index
@@ -536,8 +541,8 @@ mod tests {
index_writer.rollback().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
let index_reader = index.reader();
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(term_abcd.field());
@@ -573,10 +578,10 @@ mod tests {
index_writer.commit().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(term_abcd.field());
let reader = index.reader();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(term_abcd.field());
assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none());
@@ -584,25 +589,25 @@ mod tests {
let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap();
assert!(!advance_undeleted(&mut postings, reader));
assert!(!advance_undeleted(&mut postings, segment_reader));
}
{
let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap();
assert!(advance_undeleted(&mut postings, reader));
assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 3);
assert!(advance_undeleted(&mut postings, reader));
assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, reader));
assert!(!advance_undeleted(&mut postings, segment_reader));
}
{
let mut postings = inverted_index
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
.unwrap();
assert!(advance_undeleted(&mut postings, reader));
assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, reader));
assert!(!advance_undeleted(&mut postings, segment_reader));
}
}
}
@@ -617,8 +622,8 @@ mod tests {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let term = Term::from_field_u64(field, 1u64);
let mut postings = searcher
.segment_reader(0)
@@ -641,8 +646,8 @@ mod tests {
let negative_val = -1i64;
index_writer.add_document(doc!(value_field => negative_val));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let term = Term::from_field_i64(value_field, negative_val);
let mut postings = searcher
.segment_reader(0)
@@ -664,8 +669,9 @@ mod tests {
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a"));
assert!(index_writer.commit().is_ok());
assert!(index.load_searchers().is_ok());
let searcher = index.searcher();
let reader = index.reader();
assert!(reader.load_searchers().is_ok());
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0);
segment_reader.inverted_index(absent_field); //< should not panic
}
@@ -676,6 +682,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let reader = index.reader();
// writing the segment
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
@@ -701,8 +708,8 @@ mod tests {
remove_document(&mut index_writer, "38");
remove_document(&mut index_writer, "34");
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 6);
}
@@ -722,8 +729,8 @@ mod tests {
index_writer.commit().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
let index_reader = index.reader();
let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field);
let term_abcd = Term::from_field_text(text_field, "abcd");
@@ -747,7 +754,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let reader = index.reader();
{
// writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -757,8 +764,8 @@ mod tests {
index_writer.commit().unwrap();
}
{
index.load_searchers().unwrap();
let searcher = index.searcher();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
let topdocs = searcher.search(&query, &TestCollector).unwrap();
@@ -818,7 +825,7 @@ mod tests {
}
index_writer.commit().unwrap();
}
index.searcher();
index.reader().searcher();
}
#[test]
@@ -855,9 +862,8 @@ mod tests {
index_writer.add_document(document);
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0);
{
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(text_field);

View File

@@ -101,9 +101,8 @@ pub mod tests {
}
index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
let term = Term::from_field_text(title, "abc");
@@ -293,9 +292,8 @@ pub mod tests {
}
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let term_a = Term::from_field_text(text_field, "a");
let searcher = index.searcher();
let searcher = index.reader().searcher();
let segment_reader = searcher.segment_reader(0);
let mut postings = segment_reader
.inverted_index(text_field)
@@ -332,10 +330,9 @@ pub mod tests {
}
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
index
};
let searcher = index.searcher();
let searcher = index.reader().searcher();
let segment_reader = searcher.segment_reader(0);
// check that the basic usage works
@@ -403,8 +400,7 @@ pub mod tests {
index_writer.delete_term(term_0);
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let segment_reader = searcher.segment_reader(0);
// make sure seeking still works
@@ -451,12 +447,9 @@ pub mod tests {
{
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_1);
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
// finally, check that it's empty
{
@@ -512,7 +505,6 @@ pub mod tests {
}
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
index
};
}

View File

@@ -775,8 +775,7 @@ mod tests {
last_doc = doc + 1;
}
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(int_field);
let term = Term::from_field_u64(int_field, 0u64);
@@ -844,8 +843,7 @@ mod tests {
index_writer.add_document(doc);
}
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let segment_reader = searcher.segment_reader(0);
let mut block_segments;

View File

@@ -101,8 +101,9 @@ mod tests {
index_writer.commit().unwrap();
index_writer.add_document(doc!(field=>"ccc"));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
reader.load_searchers().unwrap();
let searcher = reader.searcher();
let weight = AllQuery.weight(&searcher, false).unwrap();
{
let reader = searcher.segment_reader(0);

View File

@@ -51,7 +51,6 @@ mod tests {
}
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
(index, text_field)
}
@@ -60,7 +59,8 @@ mod tests {
let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]);
let query = query_parser.parse_query("(+a +b) d").unwrap();
assert_eq!(query.count(&*index.searcher()).unwrap(), 3);
let searcher = index.reader().searcher();
assert_eq!(query.count(&searcher).unwrap(), 3);
}
#[test]
@@ -68,7 +68,7 @@ mod tests {
let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]);
let query = query_parser.parse_query("+a").unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
assert!(scorer.is::<TermScorer>());
@@ -78,7 +78,7 @@ mod tests {
pub fn test_boolean_termonly_intersection() {
let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]);
let searcher = index.searcher();
let searcher = index.reader().searcher();
{
let query = query_parser.parse_query("+a +b +c").unwrap();
let weight = query.weight(&searcher, true).unwrap();
@@ -97,7 +97,7 @@ mod tests {
pub fn test_boolean_reqopt() {
let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]);
let searcher = index.searcher();
let searcher = index.reader().searcher();
{
let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, true).unwrap();
@@ -126,10 +126,13 @@ mod tests {
query
};
let reader = index.reader();
let matching_docs = |boolean_query: &Query| {
let searcher = index.searcher();
let test_docs = searcher.search(boolean_query, &TestCollector).unwrap();
test_docs
reader
.searcher()
.search(boolean_query, &TestCollector)
.unwrap()
.docs()
.iter()
.cloned()
@@ -185,10 +188,12 @@ mod tests {
let query: Box<Query> = Box::new(term_query);
query
};
let reader = index.reader();
let score_docs = |boolean_query: &Query| {
let searcher = index.searcher();
let fruit = searcher.search(boolean_query, &TestCollector).unwrap();
let fruit = reader
.searcher()
.search(boolean_query, &TestCollector)
.unwrap();
fruit.scores().to_vec()
};

View File

@@ -141,8 +141,8 @@ mod test {
));
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
{
let term = Term::from_field_text(country_field, "japon");

View File

@@ -31,7 +31,6 @@ mod tests {
}
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
index
}
@@ -46,8 +45,7 @@ mod tests {
]);
let schema = index.schema();
let text_field = schema.get_field("text").unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let test_query = |texts: Vec<&str>| {
let terms: Vec<Term> = texts
.iter()
@@ -90,8 +88,7 @@ mod tests {
index_writer.add_document(doc!(text_field=>"a b c"));
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let phrase_query = PhraseQuery::new(vec![
Term::from_field_text(text_field, "a"),
Term::from_field_text(text_field, "b"),
@@ -115,8 +112,7 @@ mod tests {
let index = create_index(&["a b c", "a b c a b"]);
let schema = index.schema();
let text_field = schema.get_field("text").unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let test_query = |texts: Vec<&str>| {
let terms: Vec<Term> = texts
.iter()
@@ -148,8 +144,7 @@ mod tests {
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let test_query = |texts: Vec<&str>| {
let terms: Vec<Term> = texts
.iter()
@@ -177,8 +172,7 @@ mod tests {
index_writer.add_document(doc!(text_field=>"a b c d e f g h"));
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let test_query = |texts: Vec<(usize, &str)>| {
let terms: Vec<(usize, Term)> = texts
.iter()

View File

@@ -316,8 +316,8 @@ mod tests {
}
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
@@ -355,8 +355,8 @@ mod tests {
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let count_multiples =
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();

View File

@@ -108,8 +108,8 @@ mod test {
));
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
{
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
let scored_docs = searcher

View File

@@ -32,9 +32,7 @@ mod tests {
}
assert!(index_writer.commit().is_ok());
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let term_query = TermQuery::new(
Term::from_field_text(text_field, "a"),
IndexRecordOption::Basic,
@@ -65,8 +63,7 @@ mod tests {
index_writer.add_document(doc!(left_field => "left4 left1"));
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
{
let term = Term::from_field_text(left_field, "left2");
let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs);

142
src/reader/mod.rs Normal file
View File

@@ -0,0 +1,142 @@
mod pool;
use self::pool::{LeasedItem, Pool};
use core::Segment;
use directory::Directory;
use directory::META_LOCK;
use std::sync::Arc;
use Index;
use Result;
use Searcher;
use SegmentReader;
use schema::Schema;
#[derive(Clone, Copy)]
pub enum ReloadPolicy {
MANUAL,
// NEAR_REAL_TIME(target_ms),
ON_COMMIT,
}
#[derive(Clone)]
pub struct IndexReaderBuilder {
num_searchers: usize,
reload_policy: ReloadPolicy,
index: Index,
}
impl IndexReaderBuilder {
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
IndexReaderBuilder {
num_searchers: num_cpus::get(),
reload_policy: ReloadPolicy::MANUAL,
index,
}
}
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
self.reload_policy = reload_policy;
self
}
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
self.num_searchers = num_searchers;
self
}
}
impl Into<IndexReader> for IndexReaderBuilder {
fn into(self) -> IndexReader {
let reader = IndexReader::new(self.index, self.num_searchers, self.reload_policy);
if let Err(err) = reader.load_searchers() {
error!("Failed to load searchers.");
}
reader
}
}
struct InnerIndexReader {
num_searchers: usize,
searcher_pool: Pool<Searcher>,
reload_policy: ReloadPolicy,
index: Index,
}
impl InnerIndexReader {
fn load_searchers(&self) -> Result<()> {
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = self.searchable_segments()?;
let segment_readers: Vec<SegmentReader> = searchable_segments
.iter()
.map(SegmentReader::open)
.collect::<Result<_>>()?;
let schema = self.index.schema();
let searchers = (0..self.num_searchers)
.map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}
/// Returns the list of segments that are searchable
fn searchable_segments(&self) -> Result<Vec<Segment>> {
self.index.searchable_segments()
}
fn searcher(&self) -> LeasedItem<Searcher> {
self.searcher_pool.acquire()
}
}
pub struct IndexReader {
inner: Arc<InnerIndexReader>,
}
impl IndexReader {
/// Update searchers so that they reflect the state of the last
/// `.commit()`.
///
/// If indexing happens in the same process as searching,
/// you most likely want to call `.load_searchers()` right after each
/// successful call to `.commit()`.
///
/// If indexing and searching happen in different processes, the way to
/// get the freshest `index` at all time, is to watch `meta.json` and
/// call `load_searchers` whenever a changes happen.
pub fn load_searchers(&self) -> Result<()> {
self.inner.load_searchers()
}
/// Returns a searcher
///
/// This method should be called every single time a search
/// query is performed.
/// The searchers are taken from a pool of `num_searchers` searchers.
/// If no searcher is available
/// this may block.
///
/// The same searcher must be used for a given query, as it ensures
/// the use of a consistent segment set.
pub fn searcher(&self) -> LeasedItem<Searcher> {
self.inner.searcher()
}
pub fn schema(&self) -> Schema {
self.inner.index.schema()
}
pub(crate) fn new(
index: Index,
num_searchers: usize,
reload_policy: ReloadPolicy,
) -> IndexReader {
IndexReader {
inner: Arc::new(InnerIndexReader {
index,
num_searchers,
searcher_pool: Pool::new(),
reload_policy,
}),
}
}
}

View File

@@ -528,9 +528,8 @@ Survey in 2016, 2017, and 2018."#;
index_writer.add_document(doc!(text_field => "a"));
index_writer.add_document(doc!(text_field => "a b"));
index_writer.commit().unwrap();
index.load_searchers().unwrap();
}
let searcher = index.searcher();
let searcher = index.reader().searcher();
let query_parser = QueryParser::for_index(&index, vec![text_field]);
{
let query = query_parser.parse_query("e").unwrap();
@@ -587,8 +586,7 @@ Survey in 2016, 2017, and 2018."#;
}
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let query_parser = QueryParser::for_index(&index, vec![text_field]);
let query = query_parser.parse_query("rust design").unwrap();
let mut snippet_generator =

View File

@@ -305,9 +305,8 @@ mod test {
fn test_empty() {
let schema = Schema::builder().build();
let index = Index::create_in_ram(schema.clone());
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage();
assert_eq!(0, searcher_space_usage.total());
}
@@ -345,8 +344,8 @@ mod test {
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len());
@@ -385,8 +384,8 @@ mod test {
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len());
@@ -424,9 +423,8 @@ mod test {
index_writer.add_document(doc!(name => "hello hi goodbye"));
index_writer.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len());
@@ -472,9 +470,8 @@ mod test {
index_writer2.commit().unwrap();
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = index.reader();
let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len());

View File

@@ -159,8 +159,7 @@ mod tests {
index_writer.commit().unwrap();
}
}
index.load_searchers().unwrap();
let searcher = index.searcher();
let searcher = index.reader().searcher();
let field_searcher = searcher.field(text_field);
let mut term_it = field_searcher.terms();