Compare commits

..

1 Commits

Author SHA1 Message Date
Paul Masurel
4fc8712f1a SegmentUpdater.add_segment does not need to return true 2019-11-09 20:30:40 +09:00
75 changed files with 1337 additions and 2564 deletions

View File

@@ -1,17 +1,3 @@
Tantivy 0.11.3
=======================
- Fixed DateTime as a fast field (#735)
Tantivy 0.11.2
=======================
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
- Exposing a constructor for `WatchHandle` (#731)
Tantivy 0.11.1
=====================
- Bug fix #729
Tantivy 0.11.0 Tantivy 0.11.0
===================== =====================
@@ -23,7 +9,6 @@ Tantivy 0.11.0
- API change around `Box<BoxableTokenizer>`. See detail in #629 - API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock) - Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima) - Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock) - TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
- Added handling of pre-tokenized text fields (#642), which will enable users to - Added handling of pre-tokenized text fields (#642), which will enable users to
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara) load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
@@ -31,11 +16,10 @@ Tantivy 0.11.0
## How to update? ## How to update?
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct. - `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return - Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required. an error and handling the `Result` is required.
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
Tantivy 0.10.2 Tantivy 0.10.2
===================== =====================

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.11.3" version = "0.11.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -33,16 +33,18 @@ fs2={version="0.4", optional=true}
itertools = "0.8" itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]} levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true} notify = {version="4", optional=true}
bit-set = "0.5"
uuid = { version = "0.8", features = ["v4", "serde"] } uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.7" crossbeam = "0.7"
futures = {version = "0.3", features=["thread-pool"] } futures = "0.1"
futures-cpupool = "0.1"
owning_ref = "0.4" owning_ref = "0.4"
stable_deref_trait = "1.0.0" stable_deref_trait = "1.0.0"
rust-stemmers = "1.2" rust-stemmers = "1.1"
downcast-rs = { version="1.0" } downcast-rs = { version="1.0" }
tantivy-query-grammar = { version="0.11", path="./query-grammar" } tantivy-query-grammar = { path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]} bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4" census = "0.2"
fnv = "1.0.6" fnv = "1.0.6"
owned-read = "0.4" owned-read = "0.4"
failure = "0.1" failure = "0.1"

View File

@@ -13,100 +13,63 @@
// --- // ---
// Importing tantivy... // Importing tantivy...
use tantivy::collector::FacetCollector; use tantivy::collector::FacetCollector;
use tantivy::query::{AllQuery, TermQuery}; use tantivy::query::AllQuery;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, Index}; use tantivy::{doc, Index};
use tempfile::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the sake of this example // Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new()?;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("felin_name", TEXT | STORED); schema_builder.add_text_field("name", TEXT | STORED);
// this is our faceted field: its scientific classification
let classification = schema_builder.add_facet_field("classification"); // this is our faceted field
schema_builder.add_facet_field("tags");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer(30_000_000)?; let index = Index::create_in_dir(&index_path, schema.clone())?;
let mut index_writer = index.writer(50_000_000)?;
let name = schema.get_field("name").unwrap();
let tags = schema.get_field("tags").unwrap();
// For convenience, tantivy also comes with a macro to // For convenience, tantivy also comes with a macro to
// reduce the boilerplate above. // reduce the boilerplate above.
index_writer.add_document(doc!( index_writer.add_document(doc!(
name => "Cat", name => "the ditch",
classification => Facet::from("/Felidae/Felinae/Felis") tags => Facet::from("/pools/north")
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
name => "Canada lynx", name => "little stacey",
classification => Facet::from("/Felidae/Felinae/Lynx") tags => Facet::from("/pools/south")
));
index_writer.add_document(doc!(
name => "Cheetah",
classification => Facet::from("/Felidae/Felinae/Acinonyx")
));
index_writer.add_document(doc!(
name => "Tiger",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Lion",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Jaguar",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Sunda clouded leopard",
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
));
index_writer.add_document(doc!(
name => "Fossa",
classification => Facet::from("/Eupleridae/Cryptoprocta")
)); ));
index_writer.commit()?; index_writer.commit()?;
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
{
let mut facet_collector = FacetCollector::for_field(classification);
facet_collector.add_facet("/Felidae");
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
// This lists all of the facet counts, right below "/Felidae".
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
assert_eq!(
facets,
vec![
(&Facet::from("/Felidae/Felinae"), 3),
(&Facet::from("/Felidae/Pantherinae"), 4),
]
);
}
// Facets are also searchable. let mut facet_collector = FacetCollector::for_field(tags);
// facet_collector.add_facet("/pools");
// For instance a common UI pattern is to allow the user someone to click on a facet link
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
//
// The search would then look as follows.
// Check the reference doc for different ways to create a `Facet` object. let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
{
let facet = Facet::from_text("/Felidae/Pantherinae"); // This lists all of the facet counts
let facet_term = Term::from_facet(classification, &facet); let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic); assert_eq!(
let mut facet_collector = FacetCollector::for_field(classification); facets,
facet_collector.add_facet("/Felidae/Pantherinae"); vec![
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?; (&Facet::from("/pools/north"), 1),
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect(); (&Facet::from("/pools/south"), 1),
assert_eq!( ]
facets, );
vec![
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
]
);
}
Ok(()) Ok(())
} }

View File

@@ -65,8 +65,11 @@ fn main() -> tantivy::Result<()> {
tokens: pre_tokenize_text(body_text), tokens: pre_tokenize_text(body_text),
}; };
// Now lets create a document and add our `PreTokenizedString` // Now lets create a document and add our `PreTokenizedString` using
let old_man_doc = doc!(title => title_tok, body => body_tok); // `add_pre_tokenized_text` method of `Document`
let mut old_man_doc = Document::default();
old_man_doc.add_pre_tokenized_text(title, &title_tok);
old_man_doc.add_pre_tokenized_text(body, &body_tok);
// ... now let's just add it to the IndexWriter // ... now let's just add it to the IndexWriter
index_writer.add_document(old_man_doc); index_writer.add_document(old_man_doc);
@@ -113,9 +116,6 @@ fn main() -> tantivy::Result<()> {
assert_eq!(count, 2); assert_eq!(count, 2);
// Now let's print out the results.
// Note that the tokens are not stored along with the original text
// in the document store
for (_score, doc_address) in top_docs { for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?; let retrieved_doc = searcher.doc(doc_address)?;
println!("Document: {}", schema.to_json(&retrieved_doc)); println!("Document: {}", schema.to_json(&retrieved_doc));

View File

@@ -1,3 +0,0 @@
# Tantivy Query Grammar
This crate is used by tantivy to parse queries.

View File

@@ -13,29 +13,44 @@ use crate::SegmentReader;
/// use tantivy::collector::Count; /// use tantivy::collector::Count;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index}; /// use tantivy::{doc, Index, Result};
/// ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let mut index_writer = index.writer(3_000_000).unwrap(); /// let reader = index.reader()?;
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// let searcher = reader.searcher();
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let reader = index.reader().unwrap(); /// {
/// let searcher = reader.searcher(); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let count = searcher.search(&query, &Count).unwrap();
/// ///
/// // Here comes the important part /// assert_eq!(count, 2);
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// }
/// let query = query_parser.parse_query("diary").unwrap();
/// let count = searcher.search(&query, &Count).unwrap();
/// ///
/// assert_eq!(count, 2); /// Ok(())
/// }
/// ``` /// ```
pub struct Count; pub struct Count;

View File

@@ -86,6 +86,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// use tantivy::schema::{Facet, Schema, TEXT}; /// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{doc, Index, Result}; /// use tantivy::{doc, Index, Result};
/// ///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// ///
@@ -126,7 +127,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang"); /// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category"); /// facet_collector.add_facet("/category");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
@@ -142,7 +143,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// } /// }
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
@@ -157,8 +158,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ]); /// ]);
/// } /// }
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
@@ -171,7 +172,6 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// # assert!(example().is_ok());
/// ``` /// ```
pub struct FacetCollector { pub struct FacetCollector {
field: Field, field: Field,
@@ -452,11 +452,9 @@ impl FacetCounts {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{FacetCollector, FacetCounts}; use super::{FacetCollector, FacetCounts};
use crate::collector::Count;
use crate::core::Index; use crate::core::Index;
use crate::query::{AllQuery, QueryParser, TermQuery}; use crate::query::AllQuery;
use crate::schema::{Document, Facet, Field, IndexRecordOption, Schema}; use crate::schema::{Document, Facet, Field, Schema};
use crate::Term;
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@@ -546,56 +544,6 @@ mod tests {
assert_eq!(facets[0].1, 1); assert_eq!(facets[0].1, 1);
} }
#[test]
fn test_doc_search_by_facet() {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/A"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/B"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/C/A"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/D/C/A"),
));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 4);
let count_facet = |facet_str: &str| {
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str));
searcher
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
.unwrap()
};
assert_eq!(count_facet("/"), 4);
assert_eq!(count_facet("/A"), 3);
assert_eq!(count_facet("/A/B"), 1);
assert_eq!(count_facet("/A/C"), 1);
assert_eq!(count_facet("/A/C/A"), 1);
assert_eq!(count_facet("/C/A"), 0);
{
let query_parser = QueryParser::for_index(&index, vec![]);
{
let query = query_parser.parse_query("facet:/A/B").unwrap();
assert_eq!(1, searcher.search(&query, &Count).unwrap());
}
{
let query = query_parser.parse_query("facet:/A").unwrap();
assert_eq!(3, searcher.search(&query, &Count).unwrap());
}
}
}
#[test] #[test]
fn test_non_used_facet_collector() { fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));

View File

@@ -108,35 +108,49 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// use tantivy::collector::{Count, TopDocs, MultiCollector}; /// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index}; /// use tantivy::{doc, Index, Result};
/// ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let mut index_writer = index.writer(3_000_000).unwrap(); /// let reader = index.reader()?;
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// let searcher = reader.searcher();
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let reader = index.reader().unwrap(); /// let mut collectors = MultiCollector::new();
/// let searcher = reader.searcher(); /// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
/// ///
/// let mut collectors = MultiCollector::new(); /// let count = count_handle.extract(&mut multi_fruit);
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2)); /// let top_docs = top_docs_handle.extract(&mut multi_fruit);
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
/// ///
/// let count = count_handle.extract(&mut multi_fruit); /// # assert_eq!(count, 2);
/// let top_docs = top_docs_handle.extract(&mut multi_fruit); /// # assert_eq!(top_docs.len(), 2);
/// ///
/// assert_eq!(count, 2); /// Ok(())
/// assert_eq!(top_docs.len(), 2); /// }
/// ``` /// ```
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Default)] #[derive(Default)]

View File

@@ -29,29 +29,43 @@ use std::fmt;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, DocAddress, Index}; /// use tantivy::{doc, DocAddress, Index, Result};
/// ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); /// let reader = index.reader()?;
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// let searcher = reader.searcher();
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let reader = index.reader().unwrap(); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let searcher = reader.searcher(); /// let query = query_parser.parse_query("diary")?;
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
/// ///
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// let query = query_parser.parse_query("diary").unwrap(); /// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
/// ///
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1))); /// Ok(())
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3))); /// }
/// ``` /// ```
pub struct TopDocs(TopCollector<Score>); pub struct TopDocs(TopCollector<Score>);
@@ -88,12 +102,15 @@ impl TopDocs {
/// # /// #
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64)); /// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64)); /// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64)); /// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64)); /// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # assert!(index_writer.commit().is_ok()); /// # index_writer.commit()?;
/// # let reader = index.reader().unwrap(); /// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?; /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?; /// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
@@ -185,33 +202,27 @@ impl TopDocs {
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field; /// use tantivy::schema::Field;
/// ///
/// fn create_schema() -> Schema { /// # fn create_schema() -> Schema {
/// let mut schema_builder = Schema::builder(); /// # let mut schema_builder = Schema::builder();
/// schema_builder.add_text_field("product_name", TEXT); /// # schema_builder.add_text_field("product_name", TEXT);
/// schema_builder.add_u64_field("popularity", FAST); /// # schema_builder.add_u64_field("popularity", FAST);
/// schema_builder.build() /// # schema_builder.build()
/// } /// # }
/// /// #
/// fn create_index() -> tantivy::Result<Index> { /// # fn main() -> tantivy::Result<()> {
/// let schema = create_schema(); /// # let schema = create_schema();
/// let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// let product_name = index.schema().get_field("product_name").unwrap(); /// # let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// #
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// index_writer.commit()?;
/// Ok(index)
/// }
///
/// let index = create_index().unwrap();
/// let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// /// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// let user_query_str = "diary"; /// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// let query_parser = QueryParser::for_index(&index, vec![product_name]); /// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// let query = query_parser.parse_query(user_query_str).unwrap(); /// # index_writer.commit()?;
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
/// ///
/// // This is where we build our collector with our custom score. /// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs /// let top_docs_by_custom_score = TopDocs
@@ -238,12 +249,15 @@ impl TopDocs {
/// popularity_boost_score * original_score /// popularity_boost_score * original_score
/// } /// }
/// }); /// });
/// let reader = index.reader().unwrap(); /// # let reader = index.reader()?;
/// let searcher = reader.searcher(); /// # let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score. /// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<(Score, DocAddress)> = /// let resulting_docs: Vec<(Score, DocAddress)> =
/// searcher.search(&query, &top_docs_by_custom_score).unwrap(); /// searcher.search(&*query, &top_docs_by_custom_score)?;
///
/// # Ok(())
/// # }
/// ``` /// ```
/// ///
/// # See also /// # See also

View File

@@ -186,7 +186,7 @@ mod test {
use super::{CompositeFile, CompositeWrite}; use super::{CompositeFile, CompositeWrite};
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory}; use crate::directory::{Directory, RAMDirectory};
use crate::schema::Field; use crate::schema::Field;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;

View File

@@ -104,21 +104,23 @@ impl Index {
if Index::exists(&mmap_directory) { if Index::exists(&mmap_directory) {
return Err(TantivyError::IndexAlreadyExists); return Err(TantivyError::IndexAlreadyExists);
} }
Index::create(mmap_directory, schema) Index::create(mmap_directory, schema)
} }
/// Opens or creates a new index in the provided directory /// Opens or creates a new index in the provided directory
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> { pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
if !Index::exists(&dir) { if Index::exists(&dir) {
return Index::create(dir, schema); let index = Index::open(dir)?;
} if index.schema() == schema {
let index = Index::open(dir)?; Ok(index)
if index.schema() == schema { } else {
Ok(index) Err(TantivyError::SchemaError(
"An index exists but the schema does not match.".to_string(),
))
}
} else { } else {
Err(TantivyError::SchemaError( Index::create(dir, schema)
"An index exists but the schema does not match.".to_string(),
))
} }
} }
@@ -338,7 +340,7 @@ impl Index {
/// Creates a new segment. /// Creates a new segment.
pub fn new_segment(&self) -> Segment { pub fn new_segment(&self) -> Segment {
let mut segment_meta = self let segment_meta = self
.inventory .inventory
.new_segment_meta(SegmentId::generate_random(), 0); .new_segment_meta(SegmentId::generate_random(), 0);
self.segment(segment_meta) self.segment(segment_meta)
@@ -386,9 +388,12 @@ mod tests {
use crate::directory::RAMDirectory; use crate::directory::RAMDirectory;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::{Schema, INDEXED, TEXT}; use crate::schema::{Schema, INDEXED, TEXT};
use crate::Index;
use crate::IndexReader; use crate::IndexReader;
use crate::IndexWriter;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use crate::{Directory, Index}; use std::thread;
use std::time::Duration;
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
@@ -466,14 +471,14 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &index, &reader); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_specific { mod mmap_specific {
use super::*; use super::*;
use crate::Directory;
use std::path::PathBuf; use std::path::PathBuf;
use tempfile::TempDir; use tempfile::TempDir;
@@ -484,20 +489,22 @@ mod tests {
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap(); let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::OnCommit) .reload_policy(ReloadPolicy::OnCommit)
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &index, &reader); test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
#[test] #[test]
fn test_index_manual_policy_mmap() { fn test_index_manual_policy_mmap() {
let schema = throw_away_schema(); let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let mut index = Index::create_from_tempdir(schema).unwrap(); let index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap(); writer.commit().unwrap();
let reader = index let reader = index
@@ -507,12 +514,8 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = index.directory_mut().watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap(); writer.commit().unwrap();
assert!(receiver.recv().is_ok()); thread::sleep(Duration::from_millis(500));
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
reader.reload().unwrap(); reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 1); assert_eq!(reader.searcher().num_docs(), 1);
@@ -532,26 +535,39 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &write_index, &reader); let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
} }
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) { fn test_index_on_commit_reload_policy_aux(
let mut reader_index = reader.index(); field: Field,
let (sender, receiver) = crossbeam::channel::unbounded(); writer: &mut IndexWriter,
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || { reader: &IndexReader,
let _ = sender.send(()); ) {
}));
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
writer.commit().unwrap(); writer.commit().unwrap();
assert!(receiver.recv().is_ok()); let mut count = 0;
assert_eq!(reader.searcher().num_docs(), 1); for _ in 0..100 {
count = reader.searcher().num_docs();
if count > 0 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 1);
writer.add_document(doc!(field=>2u64)); writer.add_document(doc!(field=>2u64));
writer.commit().unwrap(); writer.commit().unwrap();
assert!(receiver.recv().is_ok()); let mut count = 0;
assert_eq!(reader.searcher().num_docs(), 2); for _ in 0..10 {
count = reader.searcher().num_docs();
if count > 1 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 2);
} }
// This test will not pass on windows, because windows // This test will not pass on windows, because windows
@@ -568,13 +584,9 @@ mod tests {
for i in 0u64..8_000u64 { for i in 0u64..8_000u64 {
writer.add_document(doc!(field => i)); writer.add_document(doc!(field => i));
} }
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = directory.watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap(); writer.commit().unwrap();
let mem_right_after_commit = directory.total_mem_usage(); let mem_right_after_commit = directory.total_mem_usage();
assert!(receiver.recv().is_ok()); thread::sleep(Duration::from_millis(1_000));
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
@@ -588,11 +600,6 @@ mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 8_000); assert_eq!(searcher.num_docs(), 8_000);
assert!( assert!(mem_right_after_merge_finished < mem_right_after_commit);
mem_right_after_merge_finished < mem_right_after_commit,
"(mem after merge){} is expected < (mem before merge){}",
mem_right_after_merge_finished,
mem_right_after_commit
);
} }
} }

View File

@@ -35,7 +35,6 @@ impl SegmentMetaInventory {
segment_id, segment_id,
max_doc, max_doc,
deletes: None, deletes: None,
bundled: false,
}; };
SegmentMeta::from(self.inventory.track(inner)) SegmentMeta::from(self.inventory.track(inner))
} }
@@ -82,19 +81,6 @@ impl SegmentMeta {
self.tracked.segment_id self.tracked.segment_id
} }
pub fn with_bundled(self) -> SegmentMeta {
SegmentMeta::from(self.tracked.map(|inner| InnerSegmentMeta {
segment_id: inner.segment_id,
max_doc: inner.max_doc,
deletes: inner.deletes.clone(),
bundled: true,
}))
}
pub fn is_bundled(&self) -> bool {
self.tracked.bundled
}
/// Returns the number of deleted documents. /// Returns the number of deleted documents.
pub fn num_deleted_docs(&self) -> u32 { pub fn num_deleted_docs(&self) -> u32 {
self.tracked self.tracked
@@ -121,12 +107,8 @@ impl SegmentMeta {
/// It just joins the segment id with the extension /// It just joins the segment id with the extension
/// associated to a segment component. /// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf { pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let suffix = self.suffix(component); let mut path = self.id().uuid_string();
self.relative_path_from_suffix(&suffix) path.push_str(&*match component {
}
fn suffix(&self, component: SegmentComponent) -> String {
match component {
SegmentComponent::POSTINGS => ".idx".to_string(), SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::POSITIONS => ".pos".to_string(), SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(), SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
@@ -135,17 +117,7 @@ impl SegmentMeta {
SegmentComponent::FASTFIELDS => ".fast".to_string(), SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(), SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)), SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
} });
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path_from_suffix(&self, suffix: &str) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(".");
path.push_str(&suffix);
PathBuf::from(path) PathBuf::from(path)
} }
@@ -189,7 +161,6 @@ impl SegmentMeta {
segment_id: inner_meta.segment_id, segment_id: inner_meta.segment_id,
max_doc, max_doc,
deletes: None, deletes: None,
bundled: inner_meta.bundled,
}); });
SegmentMeta { tracked } SegmentMeta { tracked }
} }
@@ -204,7 +175,6 @@ impl SegmentMeta {
segment_id: inner_meta.segment_id, segment_id: inner_meta.segment_id,
max_doc: inner_meta.max_doc, max_doc: inner_meta.max_doc,
deletes: Some(delete_meta), deletes: Some(delete_meta),
bundled: inner_meta.bundled,
}); });
SegmentMeta { tracked } SegmentMeta { tracked }
} }
@@ -215,7 +185,6 @@ struct InnerSegmentMeta {
segment_id: SegmentId, segment_id: SegmentId,
max_doc: u32, max_doc: u32,
deletes: Option<DeleteMeta>, deletes: Option<DeleteMeta>,
bundled: bool,
} }
impl InnerSegmentMeta { impl InnerSegmentMeta {
@@ -331,9 +300,6 @@ mod tests {
payload: None, payload: None,
}; };
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed"); let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!( assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
json,
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
);
} }
} }

View File

@@ -4,12 +4,14 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::{ReadOnlyDirectory, ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::result;
/// A segment is a piece of the index. /// A segment is a piece of the index.
#[derive(Clone)] #[derive(Clone)]
@@ -81,30 +83,23 @@ impl Segment {
} }
/// Open one of the component file for a *regular* read. /// Open one of the component file for a *regular* read.
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> { pub fn open_read(
&self,
component: SegmentComponent,
) -> result::Result<ReadOnlySource, OpenReadError> {
let path = self.relative_path(component); let path = self.relative_path(component);
let source = self.index.directory().open_read(&path)?; let source = self.index.directory().open_read(&path)?;
Ok(source) Ok(source)
} }
/// Open one of the component file for *regular* write. /// Open one of the component file for *regular* write.
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> { pub fn open_write(
let path = self.relative_path(component);
self.index.directory_mut().open_write(&path)
}
pub fn open_bundle_writer(&mut self) -> Result<WritePtr, OpenWriteError> {
let path = self.meta.relative_path_from_suffix("bundle");
self.index.directory_mut().open_write(&path)
}
pub(crate) fn open_write_in_directory(
&mut self, &mut self,
component: SegmentComponent, component: SegmentComponent,
directory: &mut dyn Directory, ) -> result::Result<WritePtr, OpenWriteError> {
) -> Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component); let path = self.relative_path(component);
directory.open_write(&path) let write = self.index.directory_mut().open_write(&path)?;
Ok(write)
} }
} }
@@ -114,5 +109,5 @@ pub trait SerializableSegment {
/// ///
/// # Returns /// # Returns
/// The number of documents in the segment. /// The number of documents in the segment.
fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32>; fn write(&self, serializer: SegmentSerializer) -> Result<u32>;
} }

View File

@@ -1,97 +0,0 @@
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::OpenReadError;
use crate::directory::ReadOnlySource;
use crate::error::DataCorruption;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
#[derive(Clone)]
struct BundleDirectory {
source_map: Arc<HashMap<PathBuf, ReadOnlySource>>,
}
impl BundleDirectory {
pub fn from_source(source: ReadOnlySource) -> Result<BundleDirectory, DataCorruption> {
let mut index_offset_buf = [0u8; 8];
let (body_idx, footer_offset) = source.split_from_end(8);
index_offset_buf.copy_from_slice(footer_offset.as_slice());
let offset = u64::from_le_bytes(index_offset_buf);
let (body_source, idx_source) = body_idx.split(offset as usize);
let idx: HashMap<PathBuf, (u64, u64)> = serde_json::from_slice(idx_source.as_slice())
.map_err(|err| {
let msg = format!("Failed to read index from bundle. {:?}", err);
DataCorruption::comment_only(msg)
})?;
let source_map: HashMap<PathBuf, ReadOnlySource> = idx
.into_iter()
.map(|(path, (start, stop))| {
let source = body_source.slice(start as usize, stop as usize);
(path, source)
})
.collect();
Ok(BundleDirectory {
source_map: Arc::new(source_map),
})
}
}
impl ReadOnlyDirectory for BundleDirectory {
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.source_map
.get(path)
.cloned()
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))
}
fn exists(&self, path: &Path) -> bool {
self.source_map.contains_key(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let source = self
.source_map
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))?;
Ok(source.as_slice().to_vec())
}
}
#[cfg(test)]
mod tests {
use super::BundleDirectory;
use crate::directory::{RAMDirectory, ReadOnlyDirectory, TerminatingWrite};
use crate::Directory;
use std::io::Write;
use std::path::Path;
#[test]
fn test_bundle_directory() {
let mut ram_directory = RAMDirectory::default();
let test_path_atomic = Path::new("testpath_atomic");
let test_path_wrt = Path::new("testpath_wrt");
assert!(ram_directory
.atomic_write(test_path_atomic, b"titi")
.is_ok());
{
let mut test_wrt = ram_directory.open_write(test_path_wrt).unwrap();
assert!(test_wrt.write_all(b"toto").is_ok());
assert!(test_wrt.terminate().is_ok());
}
let mut dest_directory = RAMDirectory::default();
let bundle_path = Path::new("bundle");
let mut wrt = dest_directory.open_write(bundle_path).unwrap();
assert!(ram_directory.serialize_bundle(&mut wrt).is_ok());
assert!(wrt.terminate().is_ok());
let source = dest_directory.open_read(bundle_path).unwrap();
let bundle_directory = BundleDirectory::from_source(source).unwrap();
assert_eq!(
&bundle_directory.atomic_read(test_path_atomic).unwrap()[..],
b"titi"
);
assert_eq!(
&bundle_directory.open_read(test_path_wrt).unwrap()[..],
b"toto"
);
}
}

View File

@@ -100,30 +100,6 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
} }
} }
pub trait ReadOnlyDirectory {
/// Opens a virtual file for read.
///
/// Once a virtual file is open, its data may not
/// change.
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [Directory::open_write].
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [Directory::atomic_write].
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
}
/// Write-once read many (WORM) abstraction for where /// Write-once read many (WORM) abstraction for where
/// tantivy's data should be stored. /// tantivy's data should be stored.
/// ///
@@ -134,9 +110,18 @@ pub trait ReadOnlyDirectory {
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which /// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be used mostly for tests. /// should be used mostly for tests.
/// ///
pub trait Directory: pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
DirectoryClone + ReadOnlyDirectory + fmt::Debug + Send + Sync + 'static /// Opens a virtual file for read.
{ ///
/// Once a virtual file is open, its data may not
/// change.
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file /// Removes a file
/// ///
/// Removing a file will not affect an eventual /// Removing a file will not affect an eventual
@@ -146,6 +131,9 @@ pub trait Directory:
/// `DeleteError::DoesNotExist`. /// `DeleteError::DoesNotExist`.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>; fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Opens a writer for the *virtual file* associated with /// Opens a writer for the *virtual file* associated with
/// a Path. /// a Path.
/// ///
@@ -167,6 +155,14 @@ pub trait Directory:
/// The file may not previously exist. /// The file may not previously exist.
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>; fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data. /// Atomically replace the content of a file with data.
/// ///
/// This calls ensure that reads can never *observe* /// This calls ensure that reads can never *observe*
@@ -201,7 +197,7 @@ pub trait Directory:
/// Registers a callback that will be called whenever a change on the `meta.json` /// Registers a callback that will be called whenever a change on the `meta.json`
/// using the `atomic_write` API is detected. /// using the `atomic_write` API is detected.
/// ///
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other /// The behavior when using `.watch()` on a file using `.open_write(...)` is, on the other
/// hand, undefined. /// hand, undefined.
/// ///
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is /// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is

View File

@@ -1,4 +1,3 @@
use crate::Version;
use std::error::Error as StdError; use std::error::Error as StdError;
use std::fmt; use std::fmt;
use std::io; use std::io;
@@ -157,65 +156,6 @@ impl StdError for OpenWriteError {
} }
} }
/// Type of index incompatibility between the library and the index found on disk
/// Used to catch and provide a hint to solve this incompatibility issue
pub enum Incompatibility {
/// This library cannot decompress the index found on disk
CompressionMismatch {
/// Compression algorithm used by the current version of tantivy
library_compression_format: String,
/// Compression algorithm that was used to serialise the index
index_compression_format: String,
},
/// The index format found on disk isn't supported by this version of the library
IndexMismatch {
/// Version used by the library
library_version: Version,
/// Version the index was built with
index_version: Version,
},
}
impl fmt::Debug for Incompatibility {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
Incompatibility::CompressionMismatch {
library_compression_format,
index_compression_format,
} => {
let err = format!(
"Library was compiled with {:?} compression, index was compressed with {:?}",
library_compression_format, index_compression_format
);
let advice = format!(
"Change the feature flag to {:?} and rebuild the library",
index_compression_format
);
write!(f, "{}. {}", err, advice)?;
}
Incompatibility::IndexMismatch {
library_version,
index_version,
} => {
let err = format!(
"Library version: {}, index version: {}",
library_version.index_format_version, index_version.index_format_version
);
// TODO make a more useful error message
// include the version range that supports this index_format_version
let advice = format!(
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
and rebuild your project.",
index_version.index_format_version, index_version.major, index_version.minor
);
write!(f, "{}. {}", err, advice)?;
}
}
Ok(())
}
}
/// Error that may occur when accessing a file read /// Error that may occur when accessing a file read
#[derive(Debug)] #[derive(Debug)]
pub enum OpenReadError { pub enum OpenReadError {
@@ -224,8 +164,6 @@ pub enum OpenReadError {
/// Any kind of IO error that happens when /// Any kind of IO error that happens when
/// interacting with the underlying IO device. /// interacting with the underlying IO device.
IOError(IOError), IOError(IOError),
/// This library doesn't support the index version found on disk
IncompatibleIndex(Incompatibility),
} }
impl From<IOError> for OpenReadError { impl From<IOError> for OpenReadError {
@@ -245,9 +183,19 @@ impl fmt::Display for OpenReadError {
"an io error occurred while opening a file for reading: '{}'", "an io error occurred while opening a file for reading: '{}'",
err err
), ),
OpenReadError::IncompatibleIndex(ref footer) => { }
write!(f, "Incompatible index format: {:?}", footer) }
} }
impl StdError for OpenReadError {
fn description(&self) -> &str {
"error occurred while opening a file for reading"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err),
} }
} }
} }
@@ -268,12 +216,6 @@ impl From<IOError> for DeleteError {
} }
} }
impl From<Incompatibility> for OpenReadError {
fn from(incompatibility: Incompatibility) -> Self {
OpenReadError::IncompatibleIndex(incompatibility)
}
}
impl fmt::Display for DeleteError { impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {

View File

@@ -1,175 +1,159 @@
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
use crate::directory::error::Incompatibility;
use crate::directory::read_only_source::ReadOnlySource; use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite}; use crate::directory::{AntiCallToken, TerminatingWrite};
use crate::Version; use byteorder::{ByteOrder, LittleEndian};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use crc32fast::Hasher; use crc32fast::Hasher;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
type CrcHashU32 = u32; const COMMON_FOOTER_SIZE: usize = 4 * 5;
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct Footer { pub struct Footer {
pub version: Version, pub tantivy_version: (u32, u32, u32),
pub meta: String, pub meta: String,
pub versioned_footer: VersionedFooter, pub versioned_footer: VersionedFooter,
} }
/// Serialises the footer to a byte-array
/// - versioned_footer_len : 4 bytes
///- versioned_footer: variable bytes
/// - meta_len: 4 bytes
/// - meta: variable bytes
/// - version_len: 4 bytes
/// - version json: variable bytes
impl BinarySerializable for Footer {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
BinarySerializable::serialize(&self.versioned_footer, writer)?;
BinarySerializable::serialize(&self.meta, writer)?;
let version_string =
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
BinarySerializable::serialize(&version_string, writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let versioned_footer = VersionedFooter::deserialize(reader)?;
let meta = String::deserialize(reader)?;
let version_json = String::deserialize(reader)?;
let version = serde_json::from_str(&version_json)?;
Ok(Footer {
version,
meta,
versioned_footer,
})
}
}
impl Footer { impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self { pub fn new(versioned_footer: VersionedFooter) -> Self {
let version = crate::VERSION.clone(); let tantivy_version = (
let meta = version.to_string(); env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer { Footer {
version, tantivy_version,
meta, meta: format!(
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer, versioned_footer,
} }
} }
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> { pub fn to_bytes(&self) -> Vec<u8> {
let mut counting_write = CountingWriter::wrap(&mut write); let mut res = self.versioned_footer.to_bytes();
self.serialize(&mut counting_write)?; res.extend_from_slice(self.meta.as_bytes());
let written_len = counting_write.written_bytes(); let len = res.len();
write.write_u32::<LittleEndian>(written_len as u32)?; res.resize(len + COMMON_FOOTER_SIZE, 0);
Ok(()) let mut common_footer = &mut res[len..];
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
} }
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> { pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
if source.len() < 4 { let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::UnexpectedEof, io::ErrorKind::UnexpectedEof,
format!( format!(
"File corrupted. The file is smaller than 4 bytes (len={}).", "File corrupted. The footer len is {}, while the entire file len is {}",
source.len() size, len
), ),
)); ));
} }
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES); let footer = &data[len - size as usize..];
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize; let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let body_len = body_footer.len() - footer_len; let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let (body, footer_data) = body_footer.split(body_len); let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let mut cursor = footer_data.as_slice(); let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
let footer = Footer::deserialize(&mut cursor)?; Ok(Footer {
Ok((footer, body)) tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
} }
/// Confirms that the index will be read correctly by this version of tantivy pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory let footer = Footer::from_bytes(source.as_slice())?;
pub fn is_compatible(&self) -> Result<(), Incompatibility> { let reader = source.slice_to(source.as_slice().len() - footer.size());
let library_version = crate::version(); Ok((footer, reader))
match &self.versioned_footer { }
VersionedFooter::V1 {
crc32: _crc, pub fn size(&self) -> usize {
store_compression: compression, self.versioned_footer.size() as usize + self.meta.len() + 20
} => {
if &library_version.store_compression != compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: compression.to_string(),
});
}
Ok(())
}
VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
library_version: library_version.clone(),
index_version: self.version.clone(),
}),
}
} }
} }
/// Footer that includes a crc32 hash that enables us to checksum files in the index
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter { pub enum VersionedFooter {
UnknownVersion, UnknownVersion { version: u32, size: u32 },
V1 { V0(u32), // crc
crc32: CrcHashU32,
store_compression: String,
},
}
impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new();
match self {
VersionedFooter::V1 {
crc32,
store_compression: compression,
} => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&1u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?;
}
VersionedFooter::UnknownVersion => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ",
));
}
}
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
writer.write_all(&buf[..])?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let len = VInt::deserialize(reader)?.0 as usize;
let mut buf = vec![0u8; len];
reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..];
let version = u32::deserialize(&mut cursor)?;
if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?;
let compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 {
crc32,
store_compression: compression,
})
} else {
Ok(VersionedFooter::UnknownVersion)
}
}
} }
impl VersionedFooter { impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> { pub fn to_bytes(&self) -> Vec<u8> {
match self { match self {
VersionedFooter::V1 { crc32, .. } => Some(*crc32), VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
}
}
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
assert!(footer.len() >= 4);
let version = LittleEndian::read_u32(footer);
match version {
0 => {
if footer.len() == 8 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
} else {
Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The versioned footer len is {}, while it should be 8",
footer.len()
),
))
}
}
version => Ok(VersionedFooter::UnknownVersion {
version,
size: footer.len() as u32,
}),
}
}
pub fn size(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 8,
VersionedFooter::UnknownVersion { size, .. } => *size,
}
}
pub fn version(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 0,
VersionedFooter::UnknownVersion { version, .. } => *version,
}
}
pub fn crc(&self) -> Option<u32> {
match self {
VersionedFooter::V0(crc) => Some(*crc),
VersionedFooter::UnknownVersion { .. } => None, VersionedFooter::UnknownVersion { .. } => None,
} }
} }
@@ -205,135 +189,25 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> { impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> { fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc32 = self.hasher.take().unwrap().finalize(); let crc = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V1 {
crc32, let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
store_compression: crate::store::COMPRESSION.to_string(),
});
let mut writer = self.writer.take().unwrap(); let mut writer = self.writer.take().unwrap();
footer.append_footer(&mut writer)?; writer.write_all(&footer)?;
writer.terminate() writer.terminate()
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::CrcHashU32;
use super::FooterProxy;
use crate::common::BinarySerializable;
use crate::directory::footer::{Footer, VersionedFooter}; use crate::directory::footer::{Footer, VersionedFooter};
use crate::directory::TerminatingWrite;
use byteorder::{ByteOrder, LittleEndian};
use regex::Regex;
#[test]
fn test_versioned_footer() {
let mut vec = Vec::new();
let footer_proxy = FooterProxy::new(&mut vec);
assert!(footer_proxy.terminate().is_ok());
assert_eq!(vec.len(), 167);
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
if let VersionedFooter::V1 {
crc32: _,
store_compression,
} = footer.versioned_footer
{
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version());
}
#[test] #[test]
fn test_serialize_deserialize_footer() { fn test_serialize_deserialize_footer() {
let mut buffer = Vec::new(); let crc = 123456;
let crc32 = 123456u32; let footer = Footer::new(VersionedFooter::V0(crc));
let footer: Footer = Footer::new(VersionedFooter::V1 { let footer_bytes = footer.to_bytes();
crc32,
store_compression: "lz4".to_string(),
});
footer.serialize(&mut buffer).unwrap();
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
assert_eq!(footer_deser, footer);
}
#[test] assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
fn footer_length() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let mut buf = Vec::new();
versioned_footer.serialize(&mut buf).unwrap();
assert_eq!(buf.len(), 13);
let footer = Footer::new(versioned_footer);
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
assert!(regex_ptn.is_match(&footer.meta));
}
#[test]
fn versioned_footer_from_bytes() {
let v_footer_bytes = vec![
// versionned footer length
12 | 128,
// index format version
1,
0,
0,
0,
// crc 32
12,
35,
89,
18,
// compression format
3 | 128,
b'l',
b'z',
b'4',
];
let mut cursor = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
crc32: expected_crc,
store_compression: "lz4".to_string(),
};
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buffer = Vec::new();
assert!(versioned_footer.serialize(&mut buffer).is_ok());
assert_eq!(&v_footer_bytes[..], &buffer[..]);
}
#[test]
fn versioned_footer_panic() {
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
let mut b = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
assert!(b.is_empty());
let expected_versioned_footer = VersionedFooter::UnknownVersion;
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buf = Vec::new();
assert!(versioned_footer.serialize(&mut buf).is_err());
}
#[test]
#[cfg(not(feature = "lz4"))]
fn compression_mismatch() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let footer = Footer::new(versioned_footer);
let res = footer.is_compatible();
assert!(res.is_err());
} }
} }

View File

@@ -2,15 +2,13 @@ use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy}; use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::GarbageCollectionResult;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::META_LOCK; use crate::directory::META_LOCK;
use crate::directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{WatchCallback, WatchHandle}; use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
use crate::Result;
use crate::directory::directory::ReadOnlyDirectory;
use crc32fast::Hasher; use crc32fast::Hasher;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
@@ -66,7 +64,7 @@ fn save_managed_paths(
impl ManagedDirectory { impl ManagedDirectory {
/// Wraps a directory as managed directory. /// Wraps a directory as managed directory.
pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> { pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
match directory.atomic_read(&MANAGED_FILEPATH) { match directory.atomic_read(&MANAGED_FILEPATH) {
Ok(data) => { Ok(data) => {
let managed_files_json = String::from_utf8_lossy(&data); let managed_files_json = String::from_utf8_lossy(&data);
@@ -89,11 +87,6 @@ impl ManagedDirectory {
meta_informations: Arc::default(), meta_informations: Arc::default(),
}), }),
Err(OpenReadError::IOError(e)) => Err(From::from(e)), Err(OpenReadError::IOError(e)) => Err(From::from(e)),
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
// For the moment, this should never happen `meta.json`
// do not have any footer and cannot detect incompatibility.
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
}
} }
} }
@@ -111,10 +104,7 @@ impl ManagedDirectory {
/// If a file cannot be deleted (for permission reasons for instance) /// If a file cannot be deleted (for permission reasons for instance)
/// an error is simply logged, and the file remains in the list of managed /// an error is simply logged, and the file remains in the list of managed
/// files. /// files.
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>( pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) {
&mut self,
get_living_files: L,
) -> crate::Result<GarbageCollectionResult> {
info!("Garbage collect"); info!("Garbage collect");
let mut files_to_delete = vec![]; let mut files_to_delete = vec![];
@@ -140,25 +130,19 @@ impl ManagedDirectory {
// 2) writer change meta.json (for instance after a merge or a commit) // 2) writer change meta.json (for instance after a merge or a commit)
// 3) gc kicks in. // 3) gc kicks in.
// 4) gc removes a file that was useful for process B, before process B opened it. // 4) gc removes a file that was useful for process B, before process B opened it.
match self.acquire_lock(&META_LOCK) { if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) {
Ok(_meta_lock) => { let living_files = get_living_files();
let living_files = get_living_files(); for managed_path in &meta_informations_rlock.managed_paths {
for managed_path in &meta_informations_rlock.managed_paths { if !living_files.contains(managed_path) {
if !living_files.contains(managed_path) { files_to_delete.push(managed_path.clone());
files_to_delete.push(managed_path.clone());
}
} }
} }
Err(err) => { } else {
error!("Failed to acquire lock for GC"); error!("Failed to acquire lock for GC");
return Err(crate::Error::from(err));
}
} }
} }
let mut failed_to_delete_files = vec![];
let mut deleted_files = vec![]; let mut deleted_files = vec![];
for file_to_delete in files_to_delete { for file_to_delete in files_to_delete {
match self.delete(&file_to_delete) { match self.delete(&file_to_delete) {
Ok(_) => { Ok(_) => {
@@ -168,10 +152,9 @@ impl ManagedDirectory {
Err(file_error) => { Err(file_error) => {
match file_error { match file_error {
DeleteError::FileDoesNotExist(_) => { DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete.clone()); deleted_files.push(file_to_delete);
} }
DeleteError::IOError(_) => { DeleteError::IOError(_) => {
failed_to_delete_files.push(file_to_delete.clone());
if !cfg!(target_os = "windows") { if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file // On windows, delete is expected to fail if the file
// is mmapped. // is mmapped.
@@ -194,13 +177,10 @@ impl ManagedDirectory {
for delete_file in &deleted_files { for delete_file in &deleted_files {
managed_paths_write.remove(delete_file); managed_paths_write.remove(delete_file);
} }
save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?; if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
error!("Failed to save the list of managed files.");
}
} }
Ok(GarbageCollectionResult {
deleted_files,
failed_to_delete_files,
})
} }
/// Registers a file as managed /// Registers a file as managed
@@ -265,6 +245,13 @@ impl ManagedDirectory {
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
@@ -282,10 +269,18 @@ impl Directory for ManagedDirectory {
self.directory.atomic_write(path, data) self.directory.atomic_write(path, data)
} }
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
self.directory.delete(path) self.directory.delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> { fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
self.directory.acquire_lock(lock) self.directory.acquire_lock(lock)
} }
@@ -295,24 +290,6 @@ impl Directory for ManagedDirectory {
} }
} }
impl ReadOnlyDirectory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?;
Ok(reader)
}
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
}
impl Clone for ManagedDirectory { impl Clone for ManagedDirectory {
fn clone(&self) -> ManagedDirectory { fn clone(&self) -> ManagedDirectory {
ManagedDirectory { ManagedDirectory {
@@ -326,9 +303,7 @@ impl Clone for ManagedDirectory {
#[cfg(test)] #[cfg(test)]
mod tests_mmap_specific { mod tests_mmap_specific {
use crate::directory::{ use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
Directory, ManagedDirectory, MmapDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::Write; use std::io::Write;
@@ -353,7 +328,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(managed_directory.exists(test_path2)); assert!(managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect(); let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); managed_directory.garbage_collect(|| living_files);
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
} }
@@ -363,7 +338,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = HashSet::new(); let living_files: HashSet<PathBuf> = HashSet::new();
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
} }
@@ -385,9 +360,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap(); let _mmap_read = managed_directory.open_read(test_path1).unwrap();
assert!(managed_directory managed_directory.garbage_collect(|| living_files.clone());
.garbage_collect(|| living_files.clone())
.is_ok());
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped. // On Windows, gc should try and fail the file as it is mmapped.
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
@@ -395,7 +368,7 @@ mod tests_mmap_specific {
drop(_mmap_read); drop(_mmap_read);
// The file should still be in the list of managed file and // The file should still be in the list of managed file and
// eventually be deleted once mmap is released. // eventually be deleted once mmap is released.
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
} else { } else {
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
@@ -420,8 +393,6 @@ mod tests_mmap_specific {
write.write_all(&[3u8, 4u8, 5u8]).unwrap(); write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap(); write.terminate().unwrap();
let read_source = managed_directory.open_read(test_path2).unwrap();
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
assert!(managed_directory.list_damaged().unwrap().is_empty()); assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone(); let mut corrupted_path = tempdir_path.clone();

View File

@@ -6,7 +6,6 @@ use self::notify::RawEvent;
use self::notify::RecursiveMode; use self::notify::RecursiveMode;
use self::notify::Watcher; use self::notify::Watcher;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::LockError; use crate::directory::error::LockError;
use crate::directory::error::{ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError, DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
@@ -132,13 +131,14 @@ impl MmapCache {
} }
self.cache.remove(full_path); self.cache.remove(full_path);
self.counters.miss += 1; self.counters.miss += 1;
let mmap_opt = open_mmap(full_path)?; Ok(if let Some(mmap) = open_mmap(full_path)? {
Ok(mmap_opt.map(|mmap| {
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap)); let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
let mmap_weak = Arc::downgrade(&mmap_arc); let mmap_weak = Arc::downgrade(&mmap_arc);
self.cache.insert(full_path.to_owned(), mmap_weak); self.cache.insert(full_path.to_owned(), mmap_weak);
mmap_arc Some(mmap_arc)
})) } else {
None
})
} }
} }
@@ -174,7 +174,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point. // We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() { if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH { if filename == *META_FILEPATH {
let _ = watcher_router_clone.broadcast(); watcher_router_clone.broadcast();
} }
} }
} }
@@ -408,6 +408,24 @@ impl TerminatingWrite for SafeFileWriter {
} }
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
/// removed before the file is deleted. /// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
@@ -426,6 +444,11 @@ impl Directory for MmapDirectory {
} }
} }
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path); debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -456,6 +479,25 @@ impl Directory for MmapDirectory {
Ok(BufWriter::new(Box::new(writer))) Ok(BufWriter::new(Box::new(writer)))
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path); debug!("Atomic Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -489,50 +531,6 @@ impl Directory for MmapDirectory {
} }
} }
impl ReadOnlyDirectory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -540,15 +538,16 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use crate::indexer::LogMergePolicy;
use crate::schema::{Schema, SchemaBuilder, TEXT}; use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use std::fs; use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
#[test] #[test]
fn test_open_non_existent_path() { fn test_open_non_existant_path() {
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err()); assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
} }
@@ -641,18 +640,13 @@ mod tests {
let tmp_dir = tempfile::TempDir::new().unwrap(); let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp_dirpath = tmp_dir.path().to_owned(); let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap(); let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join(*META_FILEPATH); let tmp_file = tmp_dirpath.join("coucou");
let _handle = watch_wrapper.watch(Box::new(move || { let _handle = watch_wrapper.watch(Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
})); }));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle2 = watch_wrapper.watch(Box::new(move || {
let _ = sender.send(());
}));
assert_eq!(counter.load(Ordering::SeqCst), 0); assert_eq!(counter.load(Ordering::SeqCst), 0);
fs::write(&tmp_file, b"whateverwilldo").unwrap(); fs::write(&tmp_file, b"whateverwilldo").unwrap();
assert!(receiver.recv().is_ok()); thread::sleep(Duration::new(0, 1_000u32));
assert!(counter.load(Ordering::SeqCst) >= 1);
} }
#[test] #[test]
@@ -661,42 +655,34 @@ mod tests {
let mut schema_builder: SchemaBuilder = Schema::builder(); let mut schema_builder: SchemaBuilder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
{ {
let index = Index::create(mmap_directory.clone(), schema).unwrap(); let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut log_merge_policy = LogMergePolicy::default(); for _num_commits in 0..16 {
log_merge_policy.set_min_merge_size(3);
index_writer.set_merge_policy(Box::new(log_merge_policy));
for _num_commits in 0..10 {
for _ in 0..10 { for _ in 0..10 {
index_writer.add_document(doc!(text_field=>"abc")); index_writer.add_document(doc!(text_field=>"abc"));
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into() .try_into()
.unwrap(); .unwrap();
for _ in 0..30 {
for _ in 0..4 {
index_writer.add_document(doc!(text_field=>"abc")); index_writer.add_document(doc!(text_field=>"abc"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
} }
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
let num_segments = reader.searcher().segment_readers().len(); let num_segments = reader.searcher().segment_readers().len();
assert!(num_segments <= 4); assert_eq!(num_segments, 4);
assert_eq!( assert_eq!(
num_segments * 7, num_segments * 7,
mmap_directory.get_cache_info().mmapped.len() mmap_directory.get_cache_info().mmapped.len()
); );
} }
assert!(mmap_directory.get_cache_info().mmapped.is_empty()); assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
} }
} }

View File

@@ -7,7 +7,6 @@ WORM directory abstraction.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_directory; mod mmap_directory;
mod bundle_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod footer; mod footer;
@@ -20,26 +19,13 @@ mod watch_event_router;
pub mod error; pub mod error;
pub use self::directory::DirectoryLock; pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone, ReadOnlyDirectory}; pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub use self::ram_directory::RAMDirectory; pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle}; pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write}; use std::io::{self, BufWriter, Write};
use std::path::PathBuf;
/// Outcome of the Garbage collection
pub struct GarbageCollectionResult {
/// List of files that were deleted in this cycle
pub deleted_files: Vec<PathBuf>,
/// List of files that were schedule to be deleted in this cycle,
/// but deletion did not work. This typically happens on windows,
/// as deleting a memory mapped file is forbidden.
///
/// If a searcher is still held, a file cannot be deleted.
/// This is not considered a bug, the file will simply be deleted
/// in the next GC.
pub failed_to_delete_files: Vec<PathBuf>,
}
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory; pub use self::mmap_directory::MmapDirectory;
@@ -47,9 +33,6 @@ pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory; pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly /// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
///
/// The point is that while the type is public, it cannot be built by anyone
/// outside of this module.
pub struct AntiCallToken(()); pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer /// Trait used to indicate when no more write need to be done on a writer
@@ -80,13 +63,6 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
} }
} }
#[cfg(test)]
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
/// Write object for Directory. /// Write object for Directory.
/// ///
/// `WritePtr` are required to implement both Write /// `WritePtr` are required to implement both Write

View File

@@ -1,6 +1,4 @@
use crate::common::CountingWriter;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken; use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
@@ -117,22 +115,6 @@ impl InnerDirectory {
fn total_mem_usage(&self) -> usize { fn total_mem_usage(&self) -> usize {
self.fs.values().map(|f| f.len()).sum() self.fs.values().map(|f| f.len()).sum()
} }
fn serialize_bundle(&self, wrt: &mut WritePtr) -> io::Result<()> {
let mut counting_writer = CountingWriter::wrap(wrt);
let mut file_index: HashMap<PathBuf, (u64, u64)> = HashMap::default();
for (path, source) in &self.fs {
let start = counting_writer.written_bytes();
counting_writer.write_all(source.as_slice())?;
let stop = counting_writer.written_bytes();
file_index.insert(path.to_path_buf(), (start, stop));
}
let index_offset = counting_writer.written_bytes();
serde_json::to_writer(&mut counting_writer, &file_index)?;
let index_offset_buffer = index_offset.to_le_bytes();
counting_writer.write_all(&index_offset_buffer[..])?;
Ok(())
}
} }
impl fmt::Debug for RAMDirectory { impl fmt::Debug for RAMDirectory {
@@ -162,18 +144,13 @@ impl RAMDirectory {
pub fn total_mem_usage(&self) -> usize { pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage() self.fs.read().unwrap().total_mem_usage()
} }
/// Serialize the RAMDirectory into a bundle.
///
/// This method will fail, write nothing, and return an error if a
/// clone of this repository exists.
pub fn serialize_bundle(self, wrt: &mut WritePtr) -> io::Result<()> {
let inner_directory_rlock = self.fs.read().unwrap();
inner_directory_rlock.serialize_bundle(wrt)
}
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| { fail_point!("RAMDirectory::delete", |_| {
use crate::directory::error::IOError; use crate::directory::error::IOError;
@@ -183,6 +160,10 @@ impl Directory for RAMDirectory {
self.fs.write().unwrap().delete(path) self.fs.write().unwrap().delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.write().unwrap(); let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
@@ -196,6 +177,10 @@ impl Directory for RAMDirectory {
} }
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new( fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
@@ -206,11 +191,11 @@ impl Directory for RAMDirectory {
// Reserve the path to prevent calls to .write() to succeed. // Reserve the path to prevent calls to .write() to succeed.
self.fs.write().unwrap().write(path_buf.clone(), &[]); self.fs.write().unwrap().write(path_buf.clone(), &[]);
let mut vec_writer = VecWriter::new(path_buf, self.clone()); let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone());
vec_writer.write_all(data)?; vec_writer.write_all(data)?;
vec_writer.flush()?; vec_writer.flush()?;
if path == Path::new(&*META_FILEPATH) { if path == Path::new(&*META_FILEPATH) {
let _ = self.fs.write().unwrap().watch_router.broadcast(); self.fs.write().unwrap().watch_router.broadcast();
} }
Ok(()) Ok(())
} }
@@ -219,17 +204,3 @@ impl Directory for RAMDirectory {
Ok(self.fs.write().unwrap().watch(watch_callback)) Ok(self.fs.write().unwrap().watch(watch_callback))
} }
} }
impl ReadOnlyDirectory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
}

View File

@@ -70,12 +70,6 @@ impl ReadOnlySource {
(left, right) (left, right)
} }
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Creates a ReadOnlySource that is just a /// Creates a ReadOnlySource that is just a
/// view over a slice of the data. /// view over a slice of the data.
/// ///

View File

@@ -1,117 +1,25 @@
use super::*; use super::*;
use futures::channel::oneshot;
use futures::executor::block_on;
use std::io::Write; use std::io::Write;
use std::mem; use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::AtomicUsize;
use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::sync::atomic::Ordering;
use std::sync::Arc; use std::sync::Arc;
use std::thread;
use std::time;
use std::time::Duration; use std::time::Duration;
#[cfg(feature = "mmap")] #[test]
mod mmap_directory_tests { fn test_ram_directory() {
use crate::directory::MmapDirectory; let mut ram_directory = RAMDirectory::create();
test_directory(&mut ram_directory);
type DirectoryImpl = MmapDirectory;
fn make_directory() -> DirectoryImpl {
MmapDirectory::create_from_tempdir().unwrap()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
} }
mod ram_directory_tests { #[test]
use crate::directory::RAMDirectory; #[cfg(feature = "mmap")]
fn test_mmap_directory() {
type DirectoryImpl = RAMDirectory; let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
test_directory(&mut mmap_directory);
fn make_directory() -> DirectoryImpl {
RAMDirectory::default()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
} }
#[test] #[test]
@@ -191,39 +99,48 @@ fn test_directory_delete(directory: &mut dyn Directory) {
assert!(directory.delete(&test_path).is_err()); assert!(directory.delete(&test_path).is_err());
} }
fn test_directory(directory: &mut dyn Directory) {
test_simple(directory);
test_rewrite_forbidden(directory);
test_write_create_the_file(directory);
test_directory_delete(directory);
test_lock_non_blocking(directory);
test_lock_blocking(directory);
test_watch(directory);
}
fn test_watch(directory: &mut dyn Directory) { fn test_watch(directory: &mut dyn Directory) {
let num_progress: Arc<AtomicUsize> = Default::default();
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let (sender, receiver) = crossbeam::channel::unbounded();
let watch_callback = Box::new(move || { let watch_callback = Box::new(move || {
counter_clone.fetch_add(1, SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
}); });
// This callback is used to synchronize watching in our unit test. assert!(directory
// We bind it to a variable because the callback is removed when that .atomic_write(Path::new("meta.json"), b"random_test_data")
// handle is dropped. .is_ok());
let watch_handle = directory.watch(watch_callback).unwrap(); thread::sleep(Duration::new(0, 10_000));
let _progress_listener = directory assert_eq!(0, counter.load(Ordering::SeqCst));
.watch(Box::new(move || {
let val = num_progress.fetch_add(1, SeqCst);
let _ = sender.send(val);
}))
.unwrap();
let watch_handle = directory.watch(watch_callback).unwrap();
for i in 0..10 { for i in 0..10 {
assert_eq!(i, counter.load(SeqCst)); assert_eq!(i, counter.load(Ordering::SeqCst));
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2") .atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok()); .is_ok());
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i)); for _ in 0..1_000 {
assert_eq!(i + 1, counter.load(SeqCst)); if counter.load(Ordering::SeqCst) > i {
break;
}
thread::sleep(Duration::from_millis(10));
}
assert_eq!(i + 1, counter.load(Ordering::SeqCst));
} }
mem::drop(watch_handle); mem::drop(watch_handle);
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data") .atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok()); .is_ok());
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok()); thread::sleep(Duration::from_millis(200));
assert_eq!(10, counter.load(SeqCst)); assert_eq!(10, counter.load(Ordering::SeqCst));
} }
fn test_lock_non_blocking(directory: &mut dyn Directory) { fn test_lock_non_blocking(directory: &mut dyn Directory) {
@@ -257,13 +174,9 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
is_blocking: true, is_blocking: true,
}); });
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
let in_thread = Arc::new(AtomicBool::default());
let in_thread_clone = in_thread.clone();
let (sender, receiver) = oneshot::channel();
std::thread::spawn(move || { std::thread::spawn(move || {
//< lock_a_res is sent to the thread. //< lock_a_res is sent to the thread.
in_thread_clone.store(true, SeqCst); std::thread::sleep(time::Duration::from_millis(10));
let _just_sync = block_on(receiver);
// explicitely droping lock_a_res. It would have been sufficient to just force it // explicitely droping lock_a_res. It would have been sufficient to just force it
// to be part of the move, but the intent seems clearer that way. // to be part of the move, but the intent seems clearer that way.
drop(lock_a_res); drop(lock_a_res);
@@ -276,18 +189,14 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
}); });
assert!(lock_a_res.is_err()); assert!(lock_a_res.is_err());
} }
let directory_clone = directory.box_clone(); {
let (sender2, receiver2) = oneshot::channel(); // the blocking call should wait for at least 10ms.
let join_handle = std::thread::spawn(move || { let start = time::Instant::now();
assert!(sender2.send(()).is_ok()); let lock_a_res = directory.acquire_lock(&Lock {
let lock_a_res = directory_clone.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
is_blocking: true, is_blocking: true,
}); });
assert!(in_thread.load(SeqCst));
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
}); assert!(start.elapsed().subsec_millis() >= 10);
assert!(block_on(receiver2).is_ok()); }
assert!(sender.send(()).is_ok());
assert!(join_handle.join().is_ok());
} }

View File

@@ -1,5 +1,3 @@
use futures::channel::oneshot;
use futures::{Future, TryFutureExt};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
@@ -24,20 +22,13 @@ pub struct WatchCallbackList {
#[derive(Clone)] #[derive(Clone)]
pub struct WatchHandle(Arc<WatchCallback>); pub struct WatchHandle(Arc<WatchCallback>);
impl WatchHandle {
/// Create a WatchHandle handle.
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
WatchHandle(watch_callback)
}
}
impl WatchCallbackList { impl WatchCallbackList {
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback. /// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle { pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
let watch_callback_arc = Arc::new(watch_callback); let watch_callback_arc = Arc::new(watch_callback);
let watch_callback_weak = Arc::downgrade(&watch_callback_arc); let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
self.router.write().unwrap().push(watch_callback_weak); self.router.write().unwrap().push(watch_callback_weak);
WatchHandle::new(watch_callback_arc) WatchHandle(watch_callback_arc)
} }
fn list_callback(&self) -> Vec<Arc<WatchCallback>> { fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
@@ -56,21 +47,14 @@ impl WatchCallbackList {
} }
/// Triggers all callbacks /// Triggers all callbacks
pub fn broadcast(&self) -> impl Future<Output = ()> { pub fn broadcast(&self) {
let callbacks = self.list_callback(); let callbacks = self.list_callback();
let (sender, receiver) = oneshot::channel();
let result = receiver.unwrap_or_else(|_| ());
if callbacks.is_empty() {
let _ = sender.send(());
return result;
}
let spawn_res = std::thread::Builder::new() let spawn_res = std::thread::Builder::new()
.name("watch-callbacks".to_string()) .name("watch-callbacks".to_string())
.spawn(move || { .spawn(move || {
for callback in callbacks { for callback in callbacks {
callback(); callback();
} }
let _ = sender.send(());
}); });
if let Err(err) = spawn_res { if let Err(err) = spawn_res {
error!( error!(
@@ -78,17 +62,19 @@ impl WatchCallbackList {
err err
); );
} }
result
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use futures::executor::block_on;
use std::mem; use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::thread;
use std::time::Duration;
const WAIT_TIME: u64 = 20;
#[test] #[test]
fn test_watch_event_router_simple() { fn test_watch_event_router_simple() {
@@ -98,22 +84,22 @@ mod tests {
let inc_callback = Box::new(move || { let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
}); });
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
let handle_a = watch_event_router.subscribe(inc_callback); let handle_a = watch_event_router.subscribe(inc_callback);
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(1, counter.load(Ordering::SeqCst)); assert_eq!(1, counter.load(Ordering::SeqCst));
block_on(async { watch_event_router.broadcast();
( watch_event_router.broadcast();
watch_event_router.broadcast().await, watch_event_router.broadcast();
watch_event_router.broadcast().await, thread::sleep(Duration::from_millis(WAIT_TIME));
watch_event_router.broadcast().await,
)
});
assert_eq!(4, counter.load(Ordering::SeqCst)); assert_eq!(4, counter.load(Ordering::SeqCst));
mem::drop(handle_a); mem::drop(handle_a);
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(4, counter.load(Ordering::SeqCst)); assert_eq!(4, counter.load(Ordering::SeqCst));
} }
@@ -129,20 +115,20 @@ mod tests {
}; };
let handle_a = watch_event_router.subscribe(inc_callback(1)); let handle_a = watch_event_router.subscribe(inc_callback(1));
let handle_a2 = watch_event_router.subscribe(inc_callback(10)); let handle_a2 = watch_event_router.subscribe(inc_callback(10));
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(async { watch_event_router.broadcast();
futures::join!( watch_event_router.broadcast();
watch_event_router.broadcast(), thread::sleep(Duration::from_millis(WAIT_TIME));
watch_event_router.broadcast()
)
});
assert_eq!(22, counter.load(Ordering::SeqCst)); assert_eq!(22, counter.load(Ordering::SeqCst));
mem::drop(handle_a); mem::drop(handle_a);
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst)); assert_eq!(32, counter.load(Ordering::SeqCst));
mem::drop(handle_a2); mem::drop(handle_a2);
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst)); assert_eq!(32, counter.load(Ordering::SeqCst));
} }
@@ -156,15 +142,14 @@ mod tests {
}); });
let handle_a = watch_event_router.subscribe(inc_callback); let handle_a = watch_event_router.subscribe(inc_callback);
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(async { watch_event_router.broadcast();
let future1 = watch_event_router.broadcast(); watch_event_router.broadcast();
let future2 = watch_event_router.broadcast(); thread::sleep(Duration::from_millis(WAIT_TIME));
futures::join!(future1, future2)
});
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
thread::sleep(Duration::from_millis(WAIT_TIME));
mem::drop(handle_a); mem::drop(handle_a);
let _ = watch_event_router.broadcast(); watch_event_router.broadcast();
block_on(watch_event_router.broadcast()); thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
} }
} }

View File

@@ -2,8 +2,8 @@
use std::io; use std::io;
use crate::directory::error::LockError;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError; use crate::fastfield::FastFieldNotAvailableError;
use crate::query; use crate::query;
use crate::schema; use crate::schema;
@@ -25,10 +25,10 @@ impl DataCorruption {
} }
} }
pub fn comment_only<TS: ToString>(comment: TS) -> DataCorruption { pub fn comment_only(comment: String) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: None, filepath: None,
comment: comment.to_string(), comment,
} }
} }
} }
@@ -80,9 +80,6 @@ pub enum TantivyError {
/// System error. (e.g.: We failed spawning a new thread) /// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)] #[fail(display = "System error.'{}'", _0)]
SystemError(String), SystemError(String),
/// Index incompatible with current version of tantivy
#[fail(display = "{:?}", _0)]
IncompatibleIndex(Incompatibility),
} }
impl From<DataCorruption> for TantivyError { impl From<DataCorruption> for TantivyError {
@@ -132,9 +129,6 @@ impl From<OpenReadError> for TantivyError {
match error { match error {
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath), OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error), OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
OpenReadError::IncompatibleIndex(incompatibility) => {
TantivyError::IncompatibleIndex(incompatibility)
}
} }
} }
} }

View File

@@ -1,8 +1,9 @@
use crate::common::{BitSet, HasLen}; use crate::common::HasLen;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::space_usage::ByteCount; use crate::space_usage::ByteCount;
use crate::DocId; use crate::DocId;
use bit_set::BitSet;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -16,7 +17,7 @@ pub fn write_delete_bitset(
) -> io::Result<()> { ) -> io::Result<()> {
let mut byte = 0u8; let mut byte = 0u8;
let mut shift = 0u8; let mut shift = 0u8;
for doc in 0..max_doc { for doc in 0..(max_doc as usize) {
if delete_bitset.contains(doc) { if delete_bitset.contains(doc) {
byte |= 1 << shift; byte |= 1 << shift;
} }
@@ -31,7 +32,7 @@ pub fn write_delete_bitset(
if max_doc % 8 > 0 { if max_doc % 8 > 0 {
writer.write_all(&[byte])?; writer.write_all(&[byte])?;
} }
Ok(()) writer.flush()
} }
/// Set of deleted `DocId`s. /// Set of deleted `DocId`s.
@@ -85,6 +86,7 @@ impl HasLen for DeleteBitSet {
mod tests { mod tests {
use super::*; use super::*;
use crate::directory::*; use crate::directory::*;
use bit_set::BitSet;
use std::path::PathBuf; use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) { fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
@@ -93,26 +95,27 @@ mod tests {
{ {
let mut writer = directory.open_write(&*test_path).unwrap(); let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap(); write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
writer.terminate().unwrap();
} }
let source = directory.open_read(&test_path).unwrap(); {
let delete_bitset = DeleteBitSet::open(source); let source = directory.open_read(&test_path).unwrap();
for doc in 0..max_doc { let delete_bitset = DeleteBitSet::open(source);
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId)); for doc in 0..max_doc as usize {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
} }
assert_eq!(delete_bitset.len(), bitset.len());
} }
#[test] #[test]
fn test_delete_bitset() { fn test_delete_bitset() {
{ {
let mut bitset = BitSet::with_max_value(10); let mut bitset = BitSet::with_capacity(10);
bitset.insert(1); bitset.insert(1);
bitset.insert(9); bitset.insert(9);
test_delete_bitset_helper(&bitset, 10); test_delete_bitset_helper(&bitset, 10);
} }
{ {
let mut bitset = BitSet::with_max_value(8); let mut bitset = BitSet::with_capacity(8);
bitset.insert(1); bitset.insert(1);
bitset.insert(2); bitset.insert(2);
bitset.insert(3); bitset.insert(3);

View File

@@ -33,7 +33,6 @@ pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer; pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::chrono::{NaiveDateTime, Utc};
use crate::common; use crate::common;
use crate::schema::Cardinality; use crate::schema::Cardinality;
use crate::schema::FieldType; use crate::schema::FieldType;
@@ -50,7 +49,7 @@ mod serializer;
mod writer; mod writer;
/// Trait for types that are allowed for fast fields: (u64, i64 and f64). /// Trait for types that are allowed for fast fields: (u64, i64 and f64).
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd { pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
/// Converts a value from u64 /// Converts a value from u64
/// ///
/// Internally all fast field values are encoded as u64. /// Internally all fast field values are encoded as u64.
@@ -70,12 +69,6 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
/// Cast value to `u64`. /// Cast value to `u64`.
/// The value is just reinterpreted in memory. /// The value is just reinterpreted in memory.
fn as_u64(&self) -> u64; fn as_u64(&self) -> u64;
/// Build a default value. This default value is never used, so the value does not
/// really matter.
fn make_zero() -> Self {
Self::from_u64(0i64.to_u64())
}
} }
impl FastValue for u64 { impl FastValue for u64 {
@@ -142,34 +135,11 @@ impl FastValue for f64 {
} }
} }
impl FastValue for crate::DateTime {
fn from_u64(timestamp_u64: u64) -> Self {
let timestamp_i64 = i64::from_u64(timestamp_u64);
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
}
fn to_u64(&self) -> u64 {
self.timestamp().to_u64()
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.timestamp().as_u64()
}
}
fn value_to_u64(value: &Value) -> u64 { fn value_to_u64(value: &Value) -> u64 {
match *value { match *value {
Value::U64(ref val) => *val, Value::U64(ref val) => *val,
Value::I64(ref val) => common::i64_to_u64(*val), Value::I64(ref val) => common::i64_to_u64(*val),
Value::F64(ref val) => common::f64_to_u64(*val), Value::F64(ref val) => common::f64_to_u64(*val),
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value), _ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
} }
} }
@@ -179,14 +149,12 @@ mod tests {
use super::*; use super::*;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::merge_policy::NoMergePolicy; use crate::schema::Document;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;
use crate::schema::{Document, IntOptions};
use crate::{Index, SegmentId, SegmentReader};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
@@ -210,12 +178,6 @@ mod tests {
assert_eq!(test_fastfield.get(2), 300); assert_eq!(test_fastfield.get(2), 300);
} }
#[test]
pub fn test_fastfield_i64_u64() {
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
}
#[test] #[test]
fn test_intfastfield_small() { fn test_intfastfield_small() {
let path = Path::new("test"); let path = Path::new("test");
@@ -467,93 +429,6 @@ mod tests {
} }
} }
} }
#[test]
fn test_merge_missing_date_fast_field() {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
index_writer.commit().unwrap();
index_writer.add_document(doc!());
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let segment_ids: Vec<SegmentId> = reader
.searcher()
.segment_readers()
.iter()
.map(SegmentReader::segment_id)
.collect();
assert_eq!(segment_ids.len(), 2);
let merge_future = index_writer.merge(&segment_ids[..]);
let merge_res = futures::executor::block_on(merge_future);
assert!(merge_res.is_ok());
assert!(reader.reload().is_ok());
assert_eq!(reader.searcher().segment_readers().len(), 1);
}
#[test]
fn test_default_datetime() {
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
}
#[test]
fn test_datefastfield() {
use crate::fastfield::FastValue;
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", FAST);
let multi_date_field = schema_builder.add_date_field(
"multi_date",
IntOptions::default().set_fast(Cardinality::MultiValues),
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(1i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
));
index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(4i64.to_u64())
));
index_writer.add_document(doc!(
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let date_fast_field = fast_fields.date(date_field).unwrap();
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
let mut dates = vec![];
{
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
dates_fast_field.get_vals(0u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].timestamp(), 2i64);
assert_eq!(dates[1].timestamp(), 3i64);
}
{
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
dates_fast_field.get_vals(1u32, &mut dates);
assert!(dates.is_empty());
}
{
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
dates_fast_field.get_vals(2u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].timestamp(), 5i64);
assert_eq!(dates[1].timestamp(), 6i64);
}
}
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]

View File

@@ -45,7 +45,7 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) { pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
let (start, stop) = self.range(doc); let (start, stop) = self.range(doc);
let len = (stop - start) as usize; let len = (stop - start) as usize;
vals.resize(len, Item::make_zero()); vals.resize(len, Item::default());
self.vals_reader.get_range_u64(start, &mut vals[..]); self.vals_reader.get_range_u64(start, &mut vals[..]);
} }

View File

@@ -4,7 +4,7 @@ use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;

View File

@@ -15,11 +15,9 @@ pub struct FastFieldReaders {
fast_field_i64: HashMap<Field, FastFieldReader<i64>>, fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
fast_field_u64: HashMap<Field, FastFieldReader<u64>>, fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
fast_field_f64: HashMap<Field, FastFieldReader<f64>>, fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
fast_field_date: HashMap<Field, FastFieldReader<crate::DateTime>>,
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>, fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>, fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>, fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
fast_field_dates: HashMap<Field, MultiValueIntFastFieldReader<crate::DateTime>>,
fast_bytes: HashMap<Field, BytesFastFieldReader>, fast_bytes: HashMap<Field, BytesFastFieldReader>,
fast_fields_composite: CompositeFile, fast_fields_composite: CompositeFile,
} }
@@ -28,7 +26,6 @@ enum FastType {
I64, I64,
U64, U64,
F64, F64,
Date,
} }
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> { fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
@@ -42,9 +39,6 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
FieldType::F64(options) => options FieldType::F64(options) => options
.get_fastfield_cardinality() .get_fastfield_cardinality()
.map(|cardinality| (FastType::F64, cardinality)), .map(|cardinality| (FastType::F64, cardinality)),
FieldType::Date(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::Date, cardinality)),
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)), FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
_ => None, _ => None,
} }
@@ -59,11 +53,9 @@ impl FastFieldReaders {
fast_field_i64: Default::default(), fast_field_i64: Default::default(),
fast_field_u64: Default::default(), fast_field_u64: Default::default(),
fast_field_f64: Default::default(), fast_field_f64: Default::default(),
fast_field_date: Default::default(),
fast_field_i64s: Default::default(), fast_field_i64s: Default::default(),
fast_field_u64s: Default::default(), fast_field_u64s: Default::default(),
fast_field_f64s: Default::default(), fast_field_f64s: Default::default(),
fast_field_dates: Default::default(),
fast_bytes: Default::default(), fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(), fast_fields_composite: fast_fields_composite.clone(),
}; };
@@ -103,12 +95,6 @@ impl FastFieldReaders {
FastFieldReader::open(fast_field_data.clone()), FastFieldReader::open(fast_field_data.clone()),
); );
} }
FastType::Date => {
fast_field_readers.fast_field_date.insert(
field,
FastFieldReader::open(fast_field_data.clone()),
);
}
} }
} else { } else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry))); return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
@@ -144,14 +130,6 @@ impl FastFieldReaders {
.fast_field_f64s .fast_field_f64s
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::Date => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_dates
.insert(field, multivalued_int_fast_field);
}
} }
} else { } else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry))); return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
@@ -178,6 +156,8 @@ impl FastFieldReaders {
/// If the field is a i64-fast field, return the associated u64 reader. Values are /// If the field is a i64-fast field, return the associated u64 reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. /// /// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
/// ///
///TODO should it also be lenient with f64?
///
/// This method is useful when merging segment reader. /// This method is useful when merging segment reader.
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> { pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
if let Some(u64_ff_reader) = self.u64(field) { if let Some(u64_ff_reader) = self.u64(field) {
@@ -186,12 +166,6 @@ impl FastFieldReaders {
if let Some(i64_ff_reader) = self.i64(field) { if let Some(i64_ff_reader) = self.i64(field) {
return Some(i64_ff_reader.into_u64_reader()); return Some(i64_ff_reader.into_u64_reader());
} }
if let Some(f64_ff_reader) = self.f64(field) {
return Some(f64_ff_reader.into_u64_reader());
}
if let Some(date_ff_reader) = self.date(field) {
return Some(date_ff_reader.into_u64_reader());
}
None None
} }
@@ -202,13 +176,6 @@ impl FastFieldReaders {
self.fast_field_i64.get(&field).cloned() self.fast_field_i64.get(&field).cloned()
} }
/// Returns the `i64` fast field reader reader associated to `field`.
///
/// If `field` is not a i64 fast field, this method returns `None`.
pub fn date(&self, field: Field) -> Option<FastFieldReader<crate::DateTime>> {
self.fast_field_date.get(&field).cloned()
}
/// Returns the `f64` fast field reader reader associated to `field`. /// Returns the `f64` fast field reader reader associated to `field`.
/// ///
/// If `field` is not a f64 fast field, this method returns `None`. /// If `field` is not a f64 fast field, this method returns `None`.
@@ -235,9 +202,6 @@ impl FastFieldReaders {
if let Some(i64s_ff_reader) = self.i64s(field) { if let Some(i64s_ff_reader) = self.i64s(field) {
return Some(i64s_ff_reader.into_u64s_reader()); return Some(i64s_ff_reader.into_u64s_reader());
} }
if let Some(f64s_ff_reader) = self.f64s(field) {
return Some(f64s_ff_reader.into_u64s_reader());
}
None None
} }
@@ -255,13 +219,6 @@ impl FastFieldReaders {
self.fast_field_f64s.get(&field).cloned() self.fast_field_f64s.get(&field).cloned()
} }
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
pub fn dates(&self, field: Field) -> Option<MultiValueIntFastFieldReader<crate::DateTime>> {
self.fast_field_dates.get(&field).cloned()
}
/// Returns the `bytes` fast field reader associated to `field`. /// Returns the `bytes` fast field reader associated to `field`.
/// ///
/// If `field` is not a bytes fast field, returns `None`. /// If `field` is not a bytes fast field, returns `None`.

View File

@@ -4,7 +4,7 @@ use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer}; use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema}; use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use std::collections::HashMap; use std::collections::HashMap;
@@ -17,14 +17,6 @@ pub struct FastFieldsWriter {
bytes_value_writers: Vec<BytesFastFieldWriter>, bytes_value_writers: Vec<BytesFastFieldWriter>,
} }
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
match *field_entry.field_type() {
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
_ => 0u64,
}
}
impl FastFieldsWriter { impl FastFieldsWriter {
/// Create all `FastFieldWriter` required by the schema. /// Create all `FastFieldWriter` required by the schema.
pub fn from_schema(schema: &Schema) -> FastFieldsWriter { pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
@@ -33,15 +25,18 @@ impl FastFieldsWriter {
let mut bytes_value_writers = Vec::new(); let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() { for (field, field_entry) in schema.fields() {
let default_value = match *field_entry.field_type() {
FieldType::I64(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
_ => 0u64,
};
match *field_entry.field_type() { match *field_entry.field_type() {
FieldType::I64(ref int_options) FieldType::I64(ref int_options)
| FieldType::U64(ref int_options) | FieldType::U64(ref int_options)
| FieldType::F64(ref int_options) | FieldType::F64(ref int_options) => {
| FieldType::Date(ref int_options) => {
match int_options.get_fastfield_cardinality() { match int_options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => { Some(Cardinality::SingleValue) => {
let mut fast_field_writer = IntFastFieldWriter::new(field); let mut fast_field_writer = IntFastFieldWriter::new(field);
let default_value = fast_field_default_value(field_entry);
fast_field_writer.set_val_if_missing(default_value); fast_field_writer.set_val_if_missing(default_value);
single_value_writers.push(fast_field_writer); single_value_writers.push(fast_field_writer);
} }

View File

@@ -2,7 +2,7 @@ use super::operation::DeleteOperation;
use crate::Opstamp; use crate::Opstamp;
use std::mem; use std::mem;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::{Arc, RwLock, Weak}; use std::sync::{Arc, RwLock};
// The DeleteQueue is similar in conceptually to a multiple // The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel. // consumer single producer broadcast channel.
@@ -14,15 +14,14 @@ use std::sync::{Arc, RwLock, Weak};
// //
// New consumer can be created in two ways // New consumer can be created in two ways
// - calling `delete_queue.cursor()` returns a cursor, that // - calling `delete_queue.cursor()` returns a cursor, that
// will include all future delete operation (and some or none // will include all future delete operation (and no past operations).
// of the past operations... The client is in charge of checking the opstamps.).
// - cloning an existing cursor returns a new cursor, that // - cloning an existing cursor returns a new cursor, that
// is at the exact same position, and can now advance independently // is at the exact same position, and can now advance independently
// from the original cursor. // from the original cursor.
#[derive(Default)] #[derive(Default)]
struct InnerDeleteQueue { struct InnerDeleteQueue {
writer: Vec<DeleteOperation>, writer: Vec<DeleteOperation>,
last_block: Weak<Block>, last_block: Option<Arc<Block>>,
} }
#[derive(Clone)] #[derive(Clone)]
@@ -33,31 +32,21 @@ pub struct DeleteQueue {
impl DeleteQueue { impl DeleteQueue {
// Creates a new delete queue. // Creates a new delete queue.
pub fn new() -> DeleteQueue { pub fn new() -> DeleteQueue {
DeleteQueue { let delete_queue = DeleteQueue {
inner: Arc::default(), inner: Arc::default(),
} };
}
let next_block = NextBlock::from(delete_queue.clone());
fn get_last_block(&self) -> Arc<Block> {
{ {
// try get the last block with simply acquiring the read lock. let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
let rlock = self.inner.read().unwrap(); delete_queue_wlock.last_block = Some(Arc::new(Block {
if let Some(block) = rlock.last_block.upgrade() { operations: Arc::default(),
return block; next: next_block,
} }));
} }
// It failed. Let's double check after acquiring the write, as someone could have called
// `get_last_block` right after we released the rlock. delete_queue
let mut wlock = self.inner.write().unwrap();
if let Some(block) = wlock.last_block.upgrade() {
return block;
}
let block = Arc::new(Block {
operations: Arc::default(),
next: NextBlock::from(self.clone()),
});
wlock.last_block = Arc::downgrade(&block);
block
} }
// Creates a new cursor that makes it possible to // Creates a new cursor that makes it possible to
@@ -65,7 +54,17 @@ impl DeleteQueue {
// //
// Past delete operations are not accessible. // Past delete operations are not accessible.
pub fn cursor(&self) -> DeleteCursor { pub fn cursor(&self) -> DeleteCursor {
let last_block = self.get_last_block(); let last_block = self
.inner
.read()
.expect("Read lock poisoned when opening delete queue cursor")
.last_block
.clone()
.expect(
"Failed to unwrap last_block. This should never happen
as the Option<> is only here to make
initialization possible",
);
let operations_len = last_block.operations.len(); let operations_len = last_block.operations.len();
DeleteCursor { DeleteCursor {
block: last_block, block: last_block,
@@ -101,19 +100,23 @@ impl DeleteQueue {
.write() .write()
.expect("Failed to acquire write lock on delete queue writer"); .expect("Failed to acquire write lock on delete queue writer");
if self_wlock.writer.is_empty() { let delete_operations;
return None; {
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
if writer.is_empty() {
return None;
}
delete_operations = mem::replace(writer, vec![]);
} }
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]); let next_block = NextBlock::from(self.clone());
{
let new_block = Arc::new(Block { self_wlock.last_block = Some(Arc::new(Block {
operations: Arc::new(delete_operations.into_boxed_slice()), operations: Arc::new(delete_operations),
next: NextBlock::from(self.clone()), next: next_block,
}); }));
}
self_wlock.last_block = Arc::downgrade(&new_block); self_wlock.last_block.clone()
Some(new_block)
} }
} }
@@ -167,7 +170,7 @@ impl NextBlock {
} }
struct Block { struct Block {
operations: Arc<Box<[DeleteOperation]>>, operations: Arc<Vec<DeleteOperation>>,
next: NextBlock, next: NextBlock,
} }

View File

@@ -1,15 +1,14 @@
use super::operation::{AddOperation, UserOperation}; use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater; use super::segment_updater::SegmentUpdater;
use super::PreparedCommit; use super::PreparedCommit;
use crate::common::BitSet;
use crate::core::Index; use crate::core::Index;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::directory::TerminatingWrite; use crate::directory::TerminatingWrite;
use crate::directory::{DirectoryLock, GarbageCollectionResult};
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset; use crate::fastfield::write_delete_bitset;
@@ -24,9 +23,10 @@ use crate::schema::Document;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use bit_set::BitSet;
use crossbeam::channel; use crossbeam::channel;
use futures::executor::block_on; use futures::{Canceled, Future};
use futures::future::Future;
use smallvec::smallvec; use smallvec::smallvec;
use smallvec::SmallVec; use smallvec::SmallVec;
use std::mem; use std::mem;
@@ -72,7 +72,7 @@ pub struct IndexWriter {
heap_size_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>, workers_join_handle: Vec<JoinHandle<Result<()>>>,
operation_receiver: OperationReceiver, operation_receiver: OperationReceiver,
operation_sender: OperationSender, operation_sender: OperationSender,
@@ -95,7 +95,7 @@ fn compute_deleted_bitset(
delete_cursor: &mut DeleteCursor, delete_cursor: &mut DeleteCursor,
doc_opstamps: &DocToOpstampMapping, doc_opstamps: &DocToOpstampMapping,
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> crate::Result<bool> { ) -> Result<bool> {
let mut might_have_changed = false; let mut might_have_changed = false;
while let Some(delete_op) = delete_cursor.get() { while let Some(delete_op) = delete_cursor.get() {
if delete_op.opstamp > target_opstamp { if delete_op.opstamp > target_opstamp {
@@ -115,7 +115,7 @@ fn compute_deleted_bitset(
while docset.advance() { while docset.advance() {
let deleted_doc = docset.doc(); let deleted_doc = docset.doc();
if deleted_doc < limit_doc { if deleted_doc < limit_doc {
delete_bitset.insert(deleted_doc); delete_bitset.insert(deleted_doc as usize);
might_have_changed = true; might_have_changed = true;
} }
} }
@@ -126,60 +126,51 @@ fn compute_deleted_bitset(
Ok(might_have_changed) Ok(might_have_changed)
} }
/// Advance delete for the given segment up to the target opstamp. /// Advance delete for the given segment up
/// /// to the target opstamp.
/// Note that there are no guarantee that the resulting `segment_entry` delete_opstamp
/// is `==` target_opstamp.
/// For instance, there was no delete operation between the state of the `segment_entry` and
/// the `target_opstamp`, `segment_entry` is not updated.
pub(crate) fn advance_deletes( pub(crate) fn advance_deletes(
mut segment: Segment, mut segment: Segment,
segment_entry: &mut SegmentEntry, segment_entry: &mut SegmentEntry,
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> crate::Result<()> { ) -> Result<()> {
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) { {
// We are already up-to-date here. if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
return Ok(()); // We are already up-to-date here.
} return Ok(());
}
if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() { let segment_reader = SegmentReader::open(&segment)?;
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
return Ok(());
}
let segment_reader = SegmentReader::open(&segment)?; let max_doc = segment_reader.max_doc();
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_capacity(max_doc as usize),
};
let max_doc = segment_reader.max_doc(); let delete_cursor = segment_entry.delete_cursor();
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() { compute_deleted_bitset(
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(), &mut delete_bitset,
None => BitSet::with_max_value(max_doc), &segment_reader,
}; delete_cursor,
&DocToOpstampMapping::None,
target_opstamp,
)?;
compute_deleted_bitset( // TODO optimize
&mut delete_bitset,
&segment_reader,
segment_entry.delete_cursor(),
&DocToOpstampMapping::None,
target_opstamp,
)?;
// TODO optimize
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
for doc in 0u32..max_doc { for doc in 0u32..max_doc {
if seg_delete_bitset.is_deleted(doc) { if segment_reader.is_deleted(doc) {
delete_bitset.insert(doc); delete_bitset.insert(doc as usize);
} }
} }
}
let num_deleted_docs = delete_bitset.len(); let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 { if num_deleted_docs > 0 {
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp); segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?; let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?; write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
delete_file.terminate()?; delete_file.terminate()?;
}
} }
segment_entry.set_meta(segment.meta().clone()); segment_entry.set_meta(segment.meta().clone());
Ok(()) Ok(())
} }
@@ -190,7 +181,7 @@ fn index_documents(
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>, grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
segment_updater: &mut SegmentUpdater, segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor, mut delete_cursor: DeleteCursor,
) -> crate::Result<bool> { ) -> Result<bool> {
let schema = segment.schema(); let schema = segment.schema();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?; let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
@@ -236,7 +227,7 @@ fn index_documents(
delete_cursor, delete_cursor,
delete_bitset_opt, delete_bitset_opt,
); );
block_on(segment_updater.schedule_add_segment(segment_entry))?; segment_updater.add_segment(segment_entry);
Ok(true) Ok(true)
} }
@@ -245,7 +236,7 @@ fn apply_deletes(
mut delete_cursor: &mut DeleteCursor, mut delete_cursor: &mut DeleteCursor,
doc_opstamps: &[Opstamp], doc_opstamps: &[Opstamp],
last_docstamp: Opstamp, last_docstamp: Opstamp,
) -> crate::Result<Option<BitSet>> { ) -> Result<Option<BitSet<u32>>> {
if delete_cursor.get().is_none() { if delete_cursor.get().is_none() {
// if there are no delete operation in the queue, no need // if there are no delete operation in the queue, no need
// to even open the segment. // to even open the segment.
@@ -255,7 +246,7 @@ fn apply_deletes(
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps); let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
let max_doc = segment.meta().max_doc(); let max_doc = segment.meta().max_doc();
let mut deleted_bitset = BitSet::with_max_value(max_doc); let mut deleted_bitset = BitSet::with_capacity(max_doc as usize);
let may_have_deletes = compute_deleted_bitset( let may_have_deletes = compute_deleted_bitset(
&mut deleted_bitset, &mut deleted_bitset,
&segment_reader, &segment_reader,
@@ -290,7 +281,7 @@ impl IndexWriter {
num_threads: usize, num_threads: usize,
heap_size_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
directory_lock: DirectoryLock, directory_lock: DirectoryLock,
) -> crate::Result<IndexWriter> { ) -> Result<IndexWriter> {
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN { if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
let err_msg = format!( let err_msg = format!(
"The heap size per thread needs to be at least {}.", "The heap size per thread needs to be at least {}.",
@@ -339,17 +330,12 @@ impl IndexWriter {
Ok(index_writer) Ok(index_writer)
} }
fn drop_sender(&mut self) {
let (sender, _receiver) = channel::bounded(1);
mem::replace(&mut self.operation_sender, sender);
}
/// If there are some merging threads, blocks until they all finish their work and /// If there are some merging threads, blocks until they all finish their work and
/// then drop the `IndexWriter`. /// then drop the `IndexWriter`.
pub fn wait_merging_threads(mut self) -> crate::Result<()> { pub fn wait_merging_threads(mut self) -> Result<()> {
// this will stop the indexing thread, // this will stop the indexing thread,
// dropping the last reference to the segment_updater. // dropping the last reference to the segment_updater.
self.drop_sender(); drop(self.operation_sender);
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]); let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
for join_handle in former_workers_handles { for join_handle in former_workers_handles {
@@ -360,6 +346,7 @@ impl IndexWriter {
TantivyError::ErrorInThread("Error in indexing worker thread.".into()) TantivyError::ErrorInThread("Error in indexing worker thread.".into())
})?; })?;
} }
drop(self.workers_join_handle);
let result = self let result = self
.segment_updater .segment_updater
@@ -374,10 +361,10 @@ impl IndexWriter {
} }
#[doc(hidden)] #[doc(hidden)]
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> { pub fn add_segment(&mut self, segment_meta: SegmentMeta) {
let delete_cursor = self.delete_queue.cursor(); let delete_cursor = self.delete_queue.cursor();
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None); let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
block_on(self.segment_updater.schedule_add_segment(segment_entry)) self.segment_updater.add_segment(segment_entry);
} }
/// Creates a new segment. /// Creates a new segment.
@@ -394,7 +381,7 @@ impl IndexWriter {
/// Spawns a new worker thread for indexing. /// Spawns a new worker thread for indexing.
/// The thread consumes documents from the pipeline. /// The thread consumes documents from the pipeline.
fn add_indexing_worker(&mut self) -> crate::Result<()> { fn add_indexing_worker(&mut self) -> Result<()> {
let document_receiver_clone = self.operation_receiver.clone(); let document_receiver_clone = self.operation_receiver.clone();
let mut segment_updater = self.segment_updater.clone(); let mut segment_updater = self.segment_updater.clone();
@@ -402,7 +389,7 @@ impl IndexWriter {
let mem_budget = self.heap_size_in_bytes_per_thread; let mem_budget = self.heap_size_in_bytes_per_thread;
let index = self.index.clone(); let index = self.index.clone();
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new() let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
.name(format!("thrd-tantivy-index{}", self.worker_id)) .name(format!("thrd-tantivy-index{}", self.worker_id))
.spawn(move || { .spawn(move || {
loop { loop {
@@ -448,23 +435,22 @@ impl IndexWriter {
self.segment_updater.get_merge_policy() self.segment_updater.get_merge_policy()
} }
/// Setter for the merge policy. /// Set the merge policy.
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
self.segment_updater.set_merge_policy(merge_policy); self.segment_updater.set_merge_policy(merge_policy);
} }
fn start_workers(&mut self) -> crate::Result<()> { fn start_workers(&mut self) -> Result<()> {
for _ in 0..self.num_threads { for _ in 0..self.num_threads {
self.add_indexing_worker()?; self.add_indexing_worker()?;
} }
Ok(()) Ok(())
} }
/// Detects and removes the files that are not used by the index anymore. /// Detects and removes the files that
pub fn garbage_collect_files( /// are not used by the index anymore.
&self, pub fn garbage_collect_files(&mut self) -> Result<()> {
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> { self.segment_updater.garbage_collect_files().wait()
self.segment_updater.schedule_garbage_collect()
} }
/// Deletes all documents from the index /// Deletes all documents from the index
@@ -503,7 +489,7 @@ impl IndexWriter {
/// Ok(()) /// Ok(())
/// } /// }
/// ``` /// ```
pub fn delete_all_documents(&self) -> crate::Result<Opstamp> { pub fn delete_all_documents(&mut self) -> Result<Opstamp> {
// Delete segments // Delete segments
self.segment_updater.remove_all_segments(); self.segment_updater.remove_all_segments();
// Return new stamp - reverted stamp // Return new stamp - reverted stamp
@@ -517,10 +503,8 @@ impl IndexWriter {
pub fn merge( pub fn merge(
&mut self, &mut self,
segment_ids: &[SegmentId], segment_ids: &[SegmentId],
) -> impl Future<Output = crate::Result<SegmentMeta>> { ) -> Result<impl Future<Item = SegmentMeta, Error = Canceled>> {
let merge_operation = self.segment_updater.make_merge_operation(segment_ids); self.segment_updater.start_merge(segment_ids)
let segment_updater = self.segment_updater.clone();
async move { segment_updater.start_merge(merge_operation)?.await }
} }
/// Closes the current document channel send. /// Closes the current document channel send.
@@ -546,8 +530,13 @@ impl IndexWriter {
/// state as it was after the last commit. /// state as it was after the last commit.
/// ///
/// The opstamp at the last commit is returned. /// The opstamp at the last commit is returned.
pub fn rollback(&mut self) -> crate::Result<Opstamp> { pub fn rollback(&mut self) -> Result<Opstamp> {
info!("Rolling back to opstamp {}", self.committed_opstamp); info!("Rolling back to opstamp {}", self.committed_opstamp);
self.rollback_impl()
}
/// Private, implementation of rollback
fn rollback_impl(&mut self) -> Result<Opstamp> {
// marks the segment updater as killed. From now on, all // marks the segment updater as killed. From now on, all
// segment updates will be ignored. // segment updates will be ignored.
self.segment_updater.kill(); self.segment_updater.kill();
@@ -603,7 +592,7 @@ impl IndexWriter {
/// It is also possible to add a payload to the `commit` /// It is also possible to add a payload to the `commit`
/// using this API. /// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html) /// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> { pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> {
// Here, because we join all of the worker threads, // Here, because we join all of the worker threads,
// all of the segment update for this commit have been // all of the segment update for this commit have been
// sent. // sent.
@@ -650,7 +639,7 @@ impl IndexWriter {
/// Commit returns the `opstamp` of the last document /// Commit returns the `opstamp` of the last document
/// that made it in the commit. /// that made it in the commit.
/// ///
pub fn commit(&mut self) -> crate::Result<Opstamp> { pub fn commit(&mut self) -> Result<Opstamp> {
self.prepare_commit()?.commit() self.prepare_commit()?.commit()
} }
@@ -691,6 +680,9 @@ impl IndexWriter {
/// The opstamp is an increasing `u64` that can /// The opstamp is an increasing `u64` that can
/// be used by the client to align commits with its own /// be used by the client to align commits with its own
/// document queue. /// document queue.
///
/// Currently it represents the number of documents that
/// have been added since the creation of the index.
pub fn add_document(&self, document: Document) -> Opstamp { pub fn add_document(&self, document: Document) -> Opstamp {
let opstamp = self.stamper.stamp(); let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document }; let add_operation = AddOperation { opstamp, document };
@@ -764,16 +756,6 @@ impl IndexWriter {
} }
} }
impl Drop for IndexWriter {
fn drop(&mut self) {
self.segment_updater.kill();
self.drop_sender();
for work in self.workers_join_handle.drain(..) {
let _ = work.join();
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -783,7 +765,7 @@ mod tests {
use crate::error::*; use crate::error::*;
use crate::indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::{self, IndexRecordOption, STRING}; use crate::schema::{self, IndexRecordOption};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use crate::Term; use crate::Term;
@@ -1208,16 +1190,4 @@ mod tests {
assert!(clear_again.is_ok()); assert!(clear_again.is_ok());
assert!(commit_again.is_ok()); assert!(commit_again.is_ok());
} }
#[test]
fn test_index_doc_missing_field() {
let mut schema_builder = schema::Schema::builder();
let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid"));
let commit = index_writer.commit();
assert!(commit.is_ok());
}
} }

View File

@@ -2,23 +2,14 @@ use crate::Opstamp;
use crate::SegmentId; use crate::SegmentId;
use census::{Inventory, TrackedObject}; use census::{Inventory, TrackedObject};
use std::collections::HashSet; use std::collections::HashSet;
use std::ops::Deref;
#[derive(Default)] #[derive(Default)]
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>); pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
impl Deref for MergeOperationInventory {
type Target = Inventory<InnerMergeOperation>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl MergeOperationInventory { impl MergeOperationInventory {
pub fn segment_in_merge(&self) -> HashSet<SegmentId> { pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
let mut segment_in_merge = HashSet::default(); let mut segment_in_merge = HashSet::default();
for merge_op in self.list() { for merge_op in self.0.list() {
for &segment_id in &merge_op.segment_ids { for &segment_id in &merge_op.segment_ids {
segment_in_merge.insert(segment_id); segment_in_merge.insert(segment_id);
} }
@@ -44,13 +35,13 @@ pub struct MergeOperation {
inner: TrackedObject<InnerMergeOperation>, inner: TrackedObject<InnerMergeOperation>,
} }
pub(crate) struct InnerMergeOperation { struct InnerMergeOperation {
target_opstamp: Opstamp, target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
} }
impl MergeOperation { impl MergeOperation {
pub(crate) fn new( pub fn new(
inventory: &MergeOperationInventory, inventory: &MergeOperationInventory,
target_opstamp: Opstamp, target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
@@ -60,7 +51,7 @@ impl MergeOperation {
segment_ids, segment_ids,
}; };
MergeOperation { MergeOperation {
inner: inventory.track(inner_merge_operation), inner: inventory.0.track(inner_merge_operation),
} }
} }

View File

@@ -709,7 +709,7 @@ mod tests {
use crate::IndexWriter; use crate::IndexWriter;
use crate::Searcher; use crate::Searcher;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::executor::block_on; use futures::Future;
use std::io::Cursor; use std::io::Cursor;
#[test] #[test]
@@ -792,7 +792,11 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
} }
{ {
@@ -1036,7 +1040,11 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
@@ -1131,7 +1139,11 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
@@ -1265,7 +1277,11 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
test_searcher( test_searcher(
@@ -1320,7 +1336,11 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
// commit has not been called yet. The document should still be // commit has not been called yet. The document should still be
// there. // there.
@@ -1341,18 +1361,22 @@ mod tests {
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_u64(int_field, 1); doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone()); index_writer.add_document(doc.clone());
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("commit failed");
index_writer.add_document(doc); index_writer.add_document(doc);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("commit failed");
index_writer.delete_term(Term::from_field_u64(int_field, 1)); index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
assert!(block_on(index_writer.merge(&segment_ids)).is_ok()); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
// assert delete has not been committed // assert delete has not been committed
assert!(reader.reload().is_ok()); reader.reload().expect("failed to load searcher 1");
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
@@ -1391,12 +1415,12 @@ mod tests {
index_doc(&mut index_writer, &[1, 5]); index_doc(&mut index_writer, &[1, 5]);
index_doc(&mut index_writer, &[3]); index_doc(&mut index_writer, &[3]);
index_doc(&mut index_writer, &[17]); index_doc(&mut index_writer, &[17]);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[20]); index_doc(&mut index_writer, &[20]);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[28, 27]); index_doc(&mut index_writer, &[28, 27]);
index_doc(&mut index_writer, &[1_000]); index_doc(&mut index_writer, &[1_000]);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("committed");
} }
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
@@ -1428,6 +1452,15 @@ mod tests {
assert_eq!(&vals, &[17]); assert_eq!(&vals, &[17]);
} }
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
{ {
let segment = searcher.segment_reader(1u32); let segment = searcher.segment_reader(1u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1451,13 +1484,27 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert!(block_on(index_writer.merge(&segment_ids)).is_ok()); index_writer
assert!(index_writer.wait_merging_threads().is_ok()); .merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer
.wait_merging_threads()
.expect("Wait for merging threads");
} }
assert!(reader.reload().is_ok()); reader.reload().expect("Load searcher");
{ {
let searcher = reader.searcher(); let searcher = reader.searcher();
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1492,46 +1539,4 @@ mod tests {
assert_eq!(&vals, &[20]); assert_eq!(&vals, &[20]);
} }
} }
#[test]
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let fast_multi = IntOptions::default().set_fast(Cardinality::MultiValues);
let field = builder.add_f64_field("f64", schema::FAST);
let multi_field = builder.add_f64_field("f64s", fast_multi);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
// Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default();
policy.set_min_merge_size(2);
writer.set_merge_policy(Box::new(policy));
for i in 0..100 {
let mut doc = Document::new();
doc.add_f64(field, 42.0);
doc.add_f64(multi_field, 0.24);
doc.add_f64(multi_field, 0.27);
writer.add_document(doc);
if i % 5 == 0 {
writer.commit()?;
}
}
writer.commit()?;
writer.wait_merging_threads()?;
// If a merging thread fails, we should end up with more
// than one segment here
assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
} }

View File

@@ -18,7 +18,7 @@ mod stamper;
pub use self::index_writer::IndexWriter; pub use self::index_writer::IndexWriter;
pub use self::log_merge_policy::LogMergePolicy; pub use self::log_merge_policy::LogMergePolicy;
pub use self::merge_operation::MergeOperation; pub use self::merge_operation::{MergeOperation, MergeOperationInventory};
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy}; pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
pub use self::prepared_commit::PreparedCommit; pub use self::prepared_commit::PreparedCommit;
pub use self::segment_entry::SegmentEntry; pub use self::segment_entry::SegmentEntry;
@@ -33,7 +33,6 @@ pub type DefaultMergePolicy = LogMergePolicy;
mod tests { mod tests {
use crate::schema::{self, Schema}; use crate::schema::{self, Schema};
use crate::{Index, Term}; use crate::{Index, Term};
#[test] #[test]
fn test_advance_delete_bug() { fn test_advance_delete_bug() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();

View File

@@ -1,7 +1,6 @@
use super::IndexWriter; use super::IndexWriter;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use crate::Result;
use futures::executor::block_on;
/// A prepared commit /// A prepared commit
pub struct PreparedCommit<'a> { pub struct PreparedCommit<'a> {
@@ -33,11 +32,9 @@ impl<'a> PreparedCommit<'a> {
pub fn commit(self) -> Result<Opstamp> { pub fn commit(self) -> Result<Opstamp> {
info!("committing {}", self.opstamp); info!("committing {}", self.opstamp);
let _ = block_on( self.index_writer
self.index_writer .segment_updater()
.segment_updater() .commit(self.opstamp, self.payload)?;
.schedule_commit(self.opstamp, self.payload),
);
Ok(self.opstamp) Ok(self.opstamp)
} }
} }

View File

@@ -1,7 +1,7 @@
use crate::common::BitSet;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use bit_set::BitSet;
use std::fmt; use std::fmt;
/// A segment entry describes the state of /// A segment entry describes the state of

View File

@@ -16,28 +16,6 @@ struct SegmentRegisters {
committed: SegmentRegister, committed: SegmentRegister,
} }
#[derive(PartialEq, Eq)]
pub(crate) enum SegmentsStatus {
Committed,
Uncommitted,
}
impl SegmentRegisters {
/// Check if all the segments are committed or uncommited.
///
/// If some segment is missing or segments are in a different state (this should not happen
/// if tantivy is used correctly), returns `None`.
fn segments_status(&self, segment_ids: &[SegmentId]) -> Option<SegmentsStatus> {
if self.uncommitted.contains_all(segment_ids) {
Some(SegmentsStatus::Uncommitted)
} else if self.committed.contains_all(segment_ids) {
Some(SegmentsStatus::Committed)
} else {
None
}
}
}
/// The segment manager stores the list of segments /// The segment manager stores the list of segments
/// as well as their state. /// as well as their state.
/// ///
@@ -175,35 +153,33 @@ impl SegmentManager {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock.uncommitted.add_segment_entry(segment_entry); registers_lock.uncommitted.add_segment_entry(segment_entry);
} }
// Replace a list of segments for their equivalent merged segment.
// pub fn end_merge(
// Returns true if these segments are committed, false if the merge segments are uncommited.
pub(crate) fn end_merge(
&self, &self,
before_merge_segment_ids: &[SegmentId], before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry, after_merge_segment_entry: SegmentEntry,
) -> crate::Result<SegmentsStatus> { ) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
let segments_status = registers_lock let target_register: &mut SegmentRegister = {
.segments_status(before_merge_segment_ids) if registers_lock
.ok_or_else(|| { .uncommitted
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.uncommitted
} else if registers_lock
.committed
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.committed
} else {
warn!("couldn't find segment in SegmentManager"); warn!("couldn't find segment in SegmentManager");
crate::Error::InvalidArgument( return;
"The segments that were merged could not be found in the SegmentManager. \ }
This is not necessarily a bug, and can happen after a rollback for instance."
.to_string(),
)
})?;
let target_register: &mut SegmentRegister = match segments_status {
SegmentsStatus::Uncommitted => &mut registers_lock.uncommitted,
SegmentsStatus::Committed => &mut registers_lock.committed,
}; };
for segment_id in before_merge_segment_ids { for segment_id in before_merge_segment_ids {
target_register.remove_segment(segment_id); target_register.remove_segment(segment_id);
} }
target_register.add_segment_entry(after_merge_segment_entry); target_register.add_segment_entry(after_merge_segment_entry);
Ok(segments_status)
} }
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> { pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {

View File

@@ -1,13 +1,10 @@
use crate::Directory; use crate::Result;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::directory::error::OpenWriteError;
use crate::directory::{DirectoryClone, RAMDirectory, TerminatingWrite, WritePtr};
use crate::fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use crate::fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use crate::postings::InvertedIndexSerializer; use crate::postings::InvertedIndexSerializer;
use crate::schema::Schema;
use crate::store::StoreWriter; use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk /// Segment serializer is in charge of laying out on disk
@@ -17,50 +14,25 @@ pub struct SegmentSerializer {
fast_field_serializer: FastFieldSerializer, fast_field_serializer: FastFieldSerializer,
fieldnorms_serializer: FieldNormsSerializer, fieldnorms_serializer: FieldNormsSerializer,
postings_serializer: InvertedIndexSerializer, postings_serializer: InvertedIndexSerializer,
bundle_writer: Option<(RAMDirectory, WritePtr)>,
}
pub(crate) struct SegmentSerializerWriters {
postings_wrt: WritePtr,
positions_skip_wrt: WritePtr,
positions_wrt: WritePtr,
terms_wrt: WritePtr,
fast_field_wrt: WritePtr,
fieldnorms_wrt: WritePtr,
store_wrt: WritePtr,
}
impl SegmentSerializerWriters {
pub(crate) fn for_segment(segment: &mut Segment) -> Result<Self, OpenWriteError> {
Ok(SegmentSerializerWriters {
postings_wrt: segment.open_write(SegmentComponent::POSTINGS)?,
positions_skip_wrt: segment.open_write(SegmentComponent::POSITIONS)?,
positions_wrt: segment.open_write(SegmentComponent::POSITIONSSKIP)?,
terms_wrt: segment.open_write(SegmentComponent::TERMS)?,
fast_field_wrt: segment.open_write(SegmentComponent::FASTFIELDS)?,
fieldnorms_wrt: segment.open_write(SegmentComponent::FIELDNORMS)?,
store_wrt: segment.open_write(SegmentComponent::STORE)?,
})
}
} }
impl SegmentSerializer { impl SegmentSerializer {
pub(crate) fn new(schema: Schema, writers: SegmentSerializerWriters) -> crate::Result<Self> { /// Creates a new `SegmentSerializer`.
let fast_field_serializer = FastFieldSerializer::from_write(writers.fast_field_wrt)?; pub fn for_segment(segment: &mut Segment) -> Result<SegmentSerializer> {
let fieldnorms_serializer = FieldNormsSerializer::from_write(writers.fieldnorms_wrt)?; let store_write = segment.open_write(SegmentComponent::STORE)?;
let postings_serializer = InvertedIndexSerializer::open(
schema, let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
writers.terms_wrt, let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
writers.postings_wrt,
writers.positions_wrt, let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
writers.positions_skip_wrt, let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
);
let postings_serializer = InvertedIndexSerializer::open(segment)?;
Ok(SegmentSerializer { Ok(SegmentSerializer {
store_writer: StoreWriter::new(writers.store_wrt), store_writer: StoreWriter::new(store_write),
fast_field_serializer, fast_field_serializer,
fieldnorms_serializer, fieldnorms_serializer,
postings_serializer, postings_serializer,
bundle_writer: None,
}) })
} }
@@ -85,15 +57,11 @@ impl SegmentSerializer {
} }
/// Finalize the segment serialization. /// Finalize the segment serialization.
pub fn close(mut self) -> crate::Result<()> { pub fn close(self) -> Result<()> {
self.fast_field_serializer.close()?; self.fast_field_serializer.close()?;
self.postings_serializer.close()?; self.postings_serializer.close()?;
self.store_writer.close()?; self.store_writer.close()?;
self.fieldnorms_serializer.close()?; self.fieldnorms_serializer.close()?;
if let Some((ram_directory, mut bundle_wrt)) = self.bundle_writer.take() {
ram_directory.serialize_bundle(&mut bundle_wrt)?;
bundle_wrt.terminate()?;
}
Ok(()) Ok(())
} }
} }

View File

@@ -6,35 +6,39 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult}; use crate::directory::{Directory, DirectoryClone};
use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes; use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory; use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger; use crate::indexer::merger::IndexMerger;
use crate::indexer::segment_manager::SegmentsStatus;
use crate::indexer::segment_serializer::SegmentSerializerWriters;
use crate::indexer::stamper::Stamper; use crate::indexer::stamper::Stamper;
use crate::indexer::MergeOperation;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy}; use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::indexer::{MergeCandidate, MergeOperation};
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use futures::channel::oneshot; use crate::Result;
use futures::executor::{ThreadPool, ThreadPoolBuilder}; use futures::oneshot;
use futures::future::Future; use futures::sync::oneshot::Receiver;
use futures::future::TryFutureExt; use futures::Future;
use futures_cpupool::Builder as CpuPoolBuilder;
use futures_cpupool::CpuFuture;
use futures_cpupool::CpuPool;
use serde_json; use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::io::Write; use std::io::Write;
use std::ops::Deref; use std::mem;
use std::ops::DerefMut;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::thread;
const NUM_MERGE_THREADS: usize = 4; use std::thread::JoinHandle;
/// Save the index meta file. /// Save the index meta file.
/// This operation is atomic : /// This operation is atomic :
@@ -45,7 +49,7 @@ const NUM_MERGE_THREADS: usize = 4;
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> { pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> {
save_metas( save_metas(
&IndexMeta { &IndexMeta {
segments: Vec::new(), segments: Vec::new(),
@@ -66,7 +70,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::R
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> { fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
info!("save metas"); info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
@@ -85,38 +89,21 @@ fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result
// We voluntarily pass a merge_operation ref to guarantee that // We voluntarily pass a merge_operation ref to guarantee that
// the merge_operation is alive during the process // the merge_operation is alive during the process
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>); pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
impl Deref for SegmentUpdater { fn perform_merge(
type Target = InnerSegmentUpdater; merge_operation: &MergeOperation,
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
async fn garbage_collect_files(
segment_updater: SegmentUpdater,
) -> crate::Result<GarbageCollectionResult> {
info!("Running garbage collection");
let mut index = segment_updater.index.clone();
index
.directory_mut()
.garbage_collect(move || segment_updater.list_files())
}
/// Merges a list of segments the list of segment givens in the `segment_entries`.
/// This function happens in the calling thread and is computationally expensive.
fn merge(
index: &Index, index: &Index,
mut segment_entries: Vec<SegmentEntry>, mut segment_entries: Vec<SegmentEntry>,
target_opstamp: Opstamp, ) -> Result<SegmentEntry> {
) -> crate::Result<SegmentEntry> { let target_opstamp = merge_operation.target_opstamp();
// first we need to apply deletes to our segment. // first we need to apply deletes to our segment.
let mut merged_segment = index.new_segment(); let mut merged_segment = index.new_segment();
// First we apply all of the delet to the merged segment, up to the target opstamp. // TODO add logging
let schema = index.schema();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
let segment = index.segment(segment_entry.meta().clone()); let segment = index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?; advance_deletes(segment, segment_entry, target_opstamp)?;
@@ -130,21 +117,22 @@ fn merge(
.collect(); .collect();
// An IndexMerger is like a "view" of our merged segments. // An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?; let merger: IndexMerger = IndexMerger::open(schema, &segments[..])?;
// ... we just serialize this index merger in our new segment to merge the two segments. // ... we just serialize this index merger in our new segment
let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut merged_segment)?; // to merge the two segments.
let segment_serializer =
SegmentSerializer::new(merged_segment.schema(), segment_serializer_wrts)?; let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
let num_docs = merger.write(segment_serializer)?; let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs); let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None)) let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
Ok(after_merge_segment_entry)
} }
pub(crate) struct InnerSegmentUpdater { struct InnerSegmentUpdater {
// we keep a copy of the current active IndexMeta to // we keep a copy of the current active IndexMeta to
// avoid loading the file everytime we need it in the // avoid loading the file everytime we need it in the
// `SegmentUpdater`. // `SegmentUpdater`.
@@ -152,12 +140,12 @@ pub(crate) struct InnerSegmentUpdater {
// This should be up to date as all update happen through // This should be up to date as all update happen through
// the unique active `SegmentUpdater`. // the unique active `SegmentUpdater`.
active_metas: RwLock<Arc<IndexMeta>>, active_metas: RwLock<Arc<IndexMeta>>,
pool: ThreadPool, pool: CpuPool,
merge_thread_pool: ThreadPool,
index: Index, index: Index,
segment_manager: SegmentManager, segment_manager: SegmentManager,
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>, merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
killed: AtomicBool, killed: AtomicBool,
stamper: Stamper, stamper: Stamper,
merge_operations: MergeOperationInventory, merge_operations: MergeOperationInventory,
@@ -168,31 +156,22 @@ impl SegmentUpdater {
index: Index, index: Index,
stamper: Stamper, stamper: Stamper,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
) -> crate::Result<SegmentUpdater> { ) -> Result<SegmentUpdater> {
let segments = index.searchable_segment_metas()?; let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor); let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
let pool = ThreadPoolBuilder::new() let pool = CpuPoolBuilder::new()
.name_prefix("segment_updater") .name_prefix("segment_updater")
.pool_size(1) .pool_size(1)
.create() .create();
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment updater thread".to_string())
})?;
let merge_thread_pool = ThreadPoolBuilder::new()
.name_prefix("merge_thread")
.pool_size(NUM_MERGE_THREADS)
.create()
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment merging thread".to_string())
})?;
let index_meta = index.load_metas()?; let index_meta = index.load_metas()?;
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater { Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
active_metas: RwLock::new(Arc::new(index_meta)), active_metas: RwLock::new(Arc::new(index_meta)),
pool, pool,
merge_thread_pool,
index, index,
segment_manager, segment_manager,
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))), merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
merging_thread_id: AtomicUsize::default(),
merging_threads: RwLock::new(HashMap::new()),
killed: AtomicBool::new(false), killed: AtomicBool::new(false),
stamper, stamper,
merge_operations: Default::default(), merge_operations: Default::default(),
@@ -200,82 +179,65 @@ impl SegmentUpdater {
} }
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> { pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.merge_policy.read().unwrap().clone() self.0.merge_policy.read().unwrap().clone()
} }
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
let arc_merge_policy = Arc::new(merge_policy); let arc_merge_policy = Arc::new(merge_policy);
*self.merge_policy.write().unwrap() = arc_merge_policy; *self.0.merge_policy.write().unwrap() = arc_merge_policy;
} }
fn schedule_future<T: 'static + Send, F: Future<Output = crate::Result<T>> + 'static + Send>( fn get_merging_thread_id(&self) -> usize {
self.0.merging_thread_id.fetch_add(1, Ordering::SeqCst)
}
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>(
&self, &self,
f: F, f: F,
) -> impl Future<Output = crate::Result<T>> { ) -> CpuFuture<T, TantivyError> {
let (sender, receiver) = oneshot::channel(); let me_clone = self.clone();
if self.is_alive() { self.0.pool.spawn_fn(move || Ok(f(me_clone)))
self.pool.spawn_ok(async move {
let _ = sender.send(f.await);
});
} else {
let _ = sender.send(Err(crate::TantivyError::SystemError(
"Segment updater killed".to_string(),
)));
}
receiver.unwrap_or_else(|_| {
let err_msg =
"A segment_updater future did not success. This should never happen.".to_string();
Err(crate::Error::SystemError(err_msg))
})
} }
pub fn schedule_add_segment( pub fn add_segment(&self, segment_entry: SegmentEntry) {
&self, self.run_async(|segment_updater| {
segment_entry: SegmentEntry, segment_updater.0.segment_manager.add_segment(segment_entry);
) -> impl Future<Output = crate::Result<()>> { segment_updater.consider_merge_options();
let segment_updater = self.clone();
self.schedule_future(async move {
segment_updater.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options().await;
Ok(())
}) })
.forget();
} }
/// Orders `SegmentManager` to remove all segments /// Orders `SegmentManager` to remove all segments
pub(crate) fn remove_all_segments(&self) { pub(crate) fn remove_all_segments(&self) {
self.segment_manager.remove_all_segments(); self.0.segment_manager.remove_all_segments();
} }
pub fn kill(&mut self) { pub fn kill(&mut self) {
self.killed.store(true, Ordering::Release); self.0.killed.store(true, Ordering::Release);
} }
pub fn is_alive(&self) -> bool { pub fn is_alive(&self) -> bool {
!self.killed.load(Ordering::Acquire) !self.0.killed.load(Ordering::Acquire)
} }
/// Apply deletes up to the target opstamp to all segments. /// Apply deletes up to the target opstamp to all segments.
/// ///
/// The method returns copies of the segment entries, /// The method returns copies of the segment entries,
/// updated with the delete information. /// updated with the delete information.
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> { fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> {
let mut segment_entries = self.segment_manager.segment_entries(); let mut segment_entries = self.0.segment_manager.segment_entries();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
let segment = self.index.segment(segment_entry.meta().clone()); let segment = self.0.index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?; advance_deletes(segment, segment_entry, target_opstamp)?;
} }
Ok(segment_entries) Ok(segment_entries)
} }
pub fn save_metas( pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) {
&self,
opstamp: Opstamp,
commit_message: Option<String>,
) -> crate::Result<()> {
if self.is_alive() { if self.is_alive() {
let index = &self.index; let index = &self.0.index;
let directory = index.directory(); let directory = index.directory();
let mut commited_segment_metas = self.segment_manager.committed_segment_metas(); let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas();
// We sort segment_readers by number of documents. // We sort segment_readers by number of documents.
// This is an heuristic to make multithreading more efficient. // This is an heuristic to make multithreading more efficient.
@@ -297,18 +259,16 @@ impl SegmentUpdater {
opstamp, opstamp,
payload: commit_message, payload: commit_message,
}; };
// TODO add context to the error. save_metas(&index_meta, directory.box_clone().borrow_mut())
save_metas(&index_meta, directory.box_clone().borrow_mut())?; .expect("Could not save metas.");
self.store_meta(&index_meta); self.store_meta(&index_meta);
} }
Ok(())
} }
pub fn schedule_garbage_collect( pub fn garbage_collect_files(&self) -> CpuFuture<(), TantivyError> {
&self, self.run_async(move |segment_updater| {
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> { segment_updater.garbage_collect_files_exec();
let garbage_collect_future = garbage_collect_files(self.clone()); })
self.schedule_future(garbage_collect_future)
} }
/// List the files that are useful to the index. /// List the files that are useful to the index.
@@ -316,130 +276,148 @@ impl SegmentUpdater {
/// This does not include lock files, or files that are obsolete /// This does not include lock files, or files that are obsolete
/// but have not yet been deleted by the garbage collector. /// but have not yet been deleted by the garbage collector.
fn list_files(&self) -> HashSet<PathBuf> { fn list_files(&self) -> HashSet<PathBuf> {
let mut files: HashSet<PathBuf> = self let mut files = HashSet::new();
.index
.list_all_segment_metas()
.into_iter()
.flat_map(|segment_meta| segment_meta.list_files())
.collect();
files.insert(META_FILEPATH.to_path_buf()); files.insert(META_FILEPATH.to_path_buf());
for segment_meta in self.0.index.list_all_segment_metas() {
files.extend(segment_meta.list_files());
}
files files
} }
pub fn schedule_commit( fn garbage_collect_files_exec(&self) {
&self, info!("Running garbage collection");
opstamp: Opstamp, let mut index = self.0.index.clone();
payload: Option<String>, index.directory_mut().garbage_collect(|| self.list_files());
) -> impl Future<Output = crate::Result<()>> { }
let segment_updater: SegmentUpdater = self.clone();
self.schedule_future(async move { pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> {
let segment_entries = segment_updater.purge_deletes(opstamp)?; self.run_async(move |segment_updater| {
segment_updater.segment_manager.commit(segment_entries); if segment_updater.is_alive() {
segment_updater.save_metas(opstamp, payload)?; let segment_entries = segment_updater
let _ = garbage_collect_files(segment_updater.clone()).await; .purge_deletes(opstamp)
segment_updater.consider_merge_options().await; .expect("Failed purge deletes");
Ok(()) segment_updater.0.segment_manager.commit(segment_entries);
segment_updater.save_metas(opstamp, payload);
segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options();
}
}) })
.wait()
}
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
let commit_opstamp = self.load_metas().opstamp;
let merge_operation = MergeOperation::new(
&self.0.merge_operations,
commit_opstamp,
segment_ids.to_vec(),
);
self.run_async(move |segment_updater| segment_updater.start_merge_impl(merge_operation))
.wait()?
} }
fn store_meta(&self, index_meta: &IndexMeta) { fn store_meta(&self, index_meta: &IndexMeta) {
*self.active_metas.write().unwrap() = Arc::new(index_meta.clone()); *self.0.active_metas.write().unwrap() = Arc::new(index_meta.clone());
} }
fn load_metas(&self) -> Arc<IndexMeta> { fn load_metas(&self) -> Arc<IndexMeta> {
self.active_metas.read().unwrap().clone() self.0.active_metas.read().unwrap().clone()
} }
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
let commit_opstamp = self.load_metas().opstamp;
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
}
// Starts a merge operation. This function will block until the merge operation is effectively
// started. Note that it does not wait for the merge to terminate.
// The calling thread should not be block for a long time, as this only involve waiting for the
// `SegmentUpdater` queue which in turns only contains lightweight operations.
//
// The merge itself happens on a different thread.
//
// When successful, this function returns a `Future` for a `Result<SegmentMeta>` that represents
// the actual outcome of the merge operation.
//
// It returns an error if for some reason the merge operation could not be started.
//
// At this point an error is not necessarily the sign of a malfunction.
// (e.g. A rollback could have happened, between the instant when the merge operaiton was
// suggested and the moment when it ended up being executed.)
//
// `segment_ids` is required to be non-empty. // `segment_ids` is required to be non-empty.
pub fn start_merge( fn start_merge_impl(&self, merge_operation: MergeOperation) -> Result<Receiver<SegmentMeta>> {
&self,
merge_operation: MergeOperation,
) -> crate::Result<impl Future<Output = crate::Result<SegmentMeta>>> {
assert!( assert!(
!merge_operation.segment_ids().is_empty(), !merge_operation.segment_ids().is_empty(),
"Segment_ids cannot be empty." "Segment_ids cannot be empty."
); );
let segment_updater = self.clone(); let segment_updater_clone = self.clone();
let segment_entries: Vec<SegmentEntry> = self let segment_entries: Vec<SegmentEntry> = self
.0
.segment_manager .segment_manager
.start_merge(merge_operation.segment_ids())?; .start_merge(merge_operation.segment_ids())?;
info!("Starting merge - {:?}", merge_operation.segment_ids()); // let segment_ids_vec = merge_operation.segment_ids.to_vec();
let (merging_future_send, merging_future_recv) = let merging_thread_id = self.get_merging_thread_id();
oneshot::channel::<crate::Result<SegmentMeta>>(); info!(
"Starting merge thread #{} - {:?}",
merging_thread_id,
merge_operation.segment_ids()
);
let (merging_future_send, merging_future_recv) = oneshot();
self.merge_thread_pool.spawn_ok(async move { // first we need to apply deletes to our segment.
// The fact that `merge_operation` is moved here is important. let merging_join_handle = thread::Builder::new()
// Its lifetime is used to track how many merging thread are currently running, .name(format!("mergingthread-{}", merging_thread_id))
// as well as which segment is currently in merge and therefore should not be .spawn(move || {
// candidate for another merge. // first we need to apply deletes to our segment.
match merge( let merge_result = perform_merge(
&segment_updater.index, &merge_operation,
segment_entries, &segment_updater_clone.0.index,
merge_operation.target_opstamp(), segment_entries,
) { );
Ok(after_merge_segment_entry) => {
let segment_meta = segment_updater match merge_result {
.end_merge(merge_operation, after_merge_segment_entry) Ok(after_merge_segment_entry) => {
.await; let merged_segment_meta = after_merge_segment_entry.meta().clone();
let _send_result = merging_future_send.send(segment_meta); segment_updater_clone
} .end_merge(merge_operation, after_merge_segment_entry)
Err(e) => { .expect("Segment updater thread is corrupted.");
warn!(
"Merge of {:?} was cancelled: {:?}", // the future may fail if the listener of the oneshot future
merge_operation.segment_ids().to_vec(), // has been destroyed.
e //
); // This is not a problem here, so we just ignore any
// ... cancel merge // possible error.
if cfg!(test) { let _merging_future_res = merging_future_send.send(merged_segment_meta);
panic!("Merge failed."); }
Err(e) => {
warn!(
"Merge of {:?} was cancelled: {:?}",
merge_operation.segment_ids(),
e
);
// ... cancel merge
if cfg!(test) {
panic!("Merge failed.");
}
// As `merge_operation` will be dropped, the segment in merge state will
// be available for merge again.
// `merging_future_send` will be dropped, sending an error to the future.
} }
} }
} segment_updater_clone
}); .0
.merging_threads
Ok(merging_future_recv .write()
.unwrap_or_else(|_| Err(crate::Error::SystemError("Merge failed".to_string())))) .unwrap()
.remove(&merging_thread_id);
Ok(())
})
.expect("Failed to spawn a thread.");
self.0
.merging_threads
.write()
.unwrap()
.insert(merging_thread_id, merging_join_handle);
Ok(merging_future_recv)
} }
async fn consider_merge_options(&self) { fn consider_merge_options(&self) {
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge(); let merge_segment_ids: HashSet<SegmentId> = self.0.merge_operations.segment_in_merge();
let (committed_segments, uncommitted_segments) = let (committed_segments, uncommitted_segments) =
get_mergeable_segments(&merge_segment_ids, &self.segment_manager); get_mergeable_segments(&merge_segment_ids, &self.0.segment_manager);
// Committed segments cannot be merged with uncommitted_segments. // Committed segments cannot be merged with uncommitted_segments.
// We therefore consider merges using these two sets of segments independently. // We therefore consider merges using these two sets of segments independently.
let merge_policy = self.get_merge_policy(); let merge_policy = self.get_merge_policy();
let current_opstamp = self.stamper.stamp(); let current_opstamp = self.0.stamper.stamp();
let mut merge_candidates: Vec<MergeOperation> = merge_policy let mut merge_candidates: Vec<MergeOperation> = merge_policy
.compute_merge_candidates(&uncommitted_segments) .compute_merge_candidates(&uncommitted_segments)
.into_iter() .into_iter()
.map(|merge_candidate| { .map(|merge_candidate| {
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0) MergeOperation::new(&self.0.merge_operations, current_opstamp, merge_candidate.0)
}) })
.collect(); .collect();
@@ -447,18 +425,25 @@ impl SegmentUpdater {
let committed_merge_candidates = merge_policy let committed_merge_candidates = merge_policy
.compute_merge_candidates(&committed_segments) .compute_merge_candidates(&committed_segments)
.into_iter() .into_iter()
.map(|merge_candidate: MergeCandidate| { .map(|merge_candidate| {
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0) MergeOperation::new(&self.0.merge_operations, commit_opstamp, merge_candidate.0)
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter()); merge_candidates.extend(committed_merge_candidates.into_iter());
for merge_operation in merge_candidates { for merge_operation in merge_candidates {
if let Err(err) = self.start_merge(merge_operation) { match self.start_merge_impl(merge_operation) {
warn!( Ok(merge_future) => {
"Starting the merge failed for the following reason. This is not fatal. {}", if let Err(e) = merge_future.fuse().poll() {
err error!("The merge task failed quickly after starting: {:?}", e);
); }
}
Err(err) => {
warn!(
"Starting the merge failed for the following reason. This is not fatal. {}",
err
);
}
} }
} }
} }
@@ -467,17 +452,15 @@ impl SegmentUpdater {
&self, &self,
merge_operation: MergeOperation, merge_operation: MergeOperation,
mut after_merge_segment_entry: SegmentEntry, mut after_merge_segment_entry: SegmentEntry,
) -> impl Future<Output = crate::Result<SegmentMeta>> { ) -> Result<()> {
let segment_updater = self.clone(); self.run_async(move |segment_updater| {
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
let end_merge_future = self.schedule_future(async move {
info!("End merge {:?}", after_merge_segment_entry.meta()); info!("End merge {:?}", after_merge_segment_entry.meta());
{ {
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone(); let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
if let Some(delete_operation) = delete_cursor.get() { if let Some(delete_operation) = delete_cursor.get() {
let committed_opstamp = segment_updater.load_metas().opstamp; let committed_opstamp = segment_updater.load_metas().opstamp;
if delete_operation.opstamp < committed_opstamp { if delete_operation.opstamp < committed_opstamp {
let index = &segment_updater.index; let index = &segment_updater.0.index;
let segment = index.segment(after_merge_segment_entry.meta().clone()); let segment = index.segment(after_merge_segment_entry.meta().clone());
if let Err(e) = advance_deletes( if let Err(e) = advance_deletes(
segment, segment,
@@ -495,26 +478,21 @@ impl SegmentUpdater {
// ... cancel merge // ... cancel merge
// `merge_operations` are tracked. As it is dropped, the // `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge. // the segment_ids will be available again for merge.
return Err(e); return;
} }
} }
} }
let previous_metas = segment_updater.load_metas(); let previous_metas = segment_updater.load_metas();
let segments_status = segment_updater segment_updater
.0
.segment_manager .segment_manager
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry)?; .end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
segment_updater.consider_merge_options();
if segments_status == SegmentsStatus::Committed { segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
segment_updater
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
}
segment_updater.consider_merge_options().await;
} // we drop all possible handle to a now useless `SegmentMeta`. } // we drop all possible handle to a now useless `SegmentMeta`.
let _ = garbage_collect_files(segment_updater).await; segment_updater.garbage_collect_files_exec();
Ok(()) })
}); .wait()
end_merge_future.map_ok(|_| after_merge_segment_meta)
} }
/// Wait for current merging threads. /// Wait for current merging threads.
@@ -532,9 +510,26 @@ impl SegmentUpdater {
/// ///
/// Obsolete files will eventually be cleaned up /// Obsolete files will eventually be cleaned up
/// by the directory garbage collector. /// by the directory garbage collector.
pub fn wait_merging_thread(&self) -> crate::Result<()> { pub fn wait_merging_thread(&self) -> Result<()> {
self.merge_operations.wait_until_empty(); loop {
Ok(()) let merging_threads: HashMap<usize, JoinHandle<Result<()>>> = {
let mut merging_threads = self.0.merging_threads.write().unwrap();
mem::replace(merging_threads.deref_mut(), HashMap::new())
};
if merging_threads.is_empty() {
return Ok(());
}
debug!("wait merging thread {}", merging_threads.len());
for (_, merging_thread_handle) in merging_threads {
merging_thread_handle
.join()
.map(|_| ())
.map_err(|_| TantivyError::ErrorInThread("Merging thread failed.".into()))?;
}
// Our merging thread may have queued their completed merged segment.
// Let's wait for that too.
self.run_async(move |_| {}).wait()?;
}
} }
} }
@@ -690,6 +685,7 @@ mod tests {
index_writer.segment_updater().remove_all_segments(); index_writer.segment_updater().remove_all_segments();
let seg_vec = index_writer let seg_vec = index_writer
.segment_updater() .segment_updater()
.0
.segment_manager .segment_manager
.segment_entries(); .segment_entries();
assert!(seg_vec.is_empty()); assert!(seg_vec.is_empty());

View File

@@ -3,7 +3,7 @@ use crate::core::Segment;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter; use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::{SegmentSerializer, SegmentSerializerWriters}; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size; use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldType; use crate::schema::FieldType;
@@ -69,8 +69,7 @@ impl SegmentWriter {
schema: &Schema, schema: &Schema,
) -> Result<SegmentWriter> { ) -> Result<SegmentWriter> {
let table_num_bits = initial_table_size(memory_budget)?; let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut segment)?; let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let segment_serializer = SegmentSerializer::new(segment.schema(), segment_serializer_wrts)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema let tokenizers = schema
.fields() .fields()
@@ -250,7 +249,6 @@ impl SegmentWriter {
} }
} }
doc.filter_fields(|field| schema.get_field_entry(field).is_stored()); doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
doc.prepare_for_store();
let doc_writer = self.segment_serializer.get_store_writer(); let doc_writer = self.segment_serializer.get_store_writer();
doc_writer.store(&doc)?; doc_writer.store(&doc)?;
self.max_doc += 1; self.max_doc += 1;

137
src/lib.rs Normal file → Executable file
View File

@@ -160,6 +160,7 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::SegmentComponent; pub use crate::core::SegmentComponent;
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
@@ -169,58 +170,11 @@ pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings; pub use crate::postings::Postings;
pub use crate::reader::LeasedItem; pub use crate::reader::LeasedItem;
pub use crate::schema::{Document, Term}; pub use crate::schema::{Document, Term};
use std::fmt;
use once_cell::sync::Lazy; /// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression.
/// Index format version. pub fn version() -> &'static str {
const INDEX_FORMAT_VERSION: u32 = 1; env!("CARGO_PKG_VERSION")
/// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Version {
major: u32,
minor: u32,
patch: u32,
index_format_version: u32,
store_compression: String,
}
impl fmt::Debug for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
static VERSION: Lazy<Version> = Lazy::new(|| Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
index_format_version: INDEX_FORMAT_VERSION,
store_compression: crate::store::COMPRESSION.to_string(),
});
impl ToString for Version {
fn to_string(&self) -> String {
format!(
"tantivy v{}.{}.{}, index_format v{}, store_compression: {}",
self.major, self.minor, self.patch, self.index_format_version, self.store_compression
)
}
}
static VERSION_STRING: Lazy<String> = Lazy::new(|| VERSION.to_string());
/// Expose the current version of tantivy as found in Cargo.toml during compilation.
/// eg. "0.11.0" as well as the compression scheme used in the docstore.
pub fn version() -> &'static Version {
&VERSION
}
/// Exposes the complete version of tantivy as found in Cargo.toml during compilation as a string.
/// eg. "tantivy v0.11.0, index_format v1, store_compression: lz4".
pub fn version_string() -> &'static str {
VERSION_STRING.as_str()
} }
/// Defines tantivy's merging strategy /// Defines tantivy's merging strategy
@@ -333,18 +287,6 @@ mod tests {
sample_with_seed(n, ratio, 4) sample_with_seed(n, ratio, 4)
} }
#[test]
#[cfg(not(feature = "lz4"))]
fn test_version_string() {
use regex::Regex;
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
let version = super::version().to_string();
assert!(regex_ptn.find(&version).is_some());
}
#[test] #[test]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() { fn test_indexing() {
@@ -940,73 +882,4 @@ mod tests {
assert_eq!(fast_field_reader.get(0), 4f64) assert_eq!(fast_field_reader.get(0), 4f64)
} }
} }
// motivated by #729
#[test]
fn test_update_via_delete_insert() {
use crate::collector::Count;
use crate::indexer::NoMergePolicy;
use crate::query::AllQuery;
use crate::SegmentId;
use futures::executor::block_on;
const DOC_COUNT: u64 = 2u64;
let mut schema_builder = SchemaBuilder::default();
let id = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index_reader = index.reader().unwrap();
let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
for doc_id in 0u64..DOC_COUNT {
index_writer.add_document(doc!(id => doc_id));
}
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
// update the 10 elements by deleting and re-adding
for doc_id in 0u64..DOC_COUNT {
index_writer.delete_term(Term::from_field_u64(id, doc_id));
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let doc = doc!(id => doc_id);
index_writer.add_document(doc);
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
// The number of document should be stable.
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
}
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.into_iter()
.map(|reader| reader.segment_id())
.collect();
block_on(index_writer.merge(&segment_ids)).unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
}
} }

View File

@@ -35,9 +35,9 @@
/// let likes = schema_builder.add_u64_field("num_u64", FAST); /// let likes = schema_builder.add_u64_field("num_u64", FAST);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let doc = doc!( /// let doc = doc!(
/// title => "Life Aquatic", /// title => "Life Aquatic",
/// author => "Wes Anderson", /// author => "Wes Anderson",
/// likes => 4u64 /// likes => 4u64
/// ); /// );
/// # } /// # }
/// ``` /// ```

View File

@@ -36,10 +36,11 @@ struct Positions {
impl Positions { impl Positions {
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions { pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES); let skip_len = skip_source.len();
let (body, footer) = skip_source.split(skip_len - u32::SIZE_IN_BYTES);
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted"); let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
let (skip_source, long_skip_source) = let body_split = body.len() - u64::SIZE_IN_BYTES * (num_long_skips as usize);
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize)); let (skip_source, long_skip_source) = body.split(body_split);
Positions { Positions {
bit_packer: BitPacker4x::new(), bit_packer: BitPacker4x::new(),
skip_source, skip_source,

View File

@@ -75,7 +75,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut segment = index.new_segment(); let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::for_segment(&mut segment).unwrap(); let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
{ {
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap(); let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
field_serializer.new_term("abc".as_bytes()).unwrap(); field_serializer.new_term("abc".as_bytes()).unwrap();

View File

@@ -10,8 +10,8 @@ use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId;
use crate::Result; use crate::Result;
use crate::{Directory, DocId};
use std::io::{self, Write}; use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
@@ -54,36 +54,33 @@ pub struct InvertedIndexSerializer {
} }
impl InvertedIndexSerializer { impl InvertedIndexSerializer {
pub(crate) fn for_segment(segment: &mut Segment) -> crate::Result<Self> { /// Open a new `InvertedIndexSerializer` for the given segment
let schema = segment.schema(); fn create(
use crate::core::SegmentComponent; terms_write: CompositeWrite<WritePtr>,
let terms_wrt = segment.open_write(SegmentComponent::TERMS)?; postings_write: CompositeWrite<WritePtr>,
let postings_wrt = segment.open_write(SegmentComponent::POSTINGS)?; positions_write: CompositeWrite<WritePtr>,
let positions_wrt = segment.open_write(SegmentComponent::POSITIONS)?; positionsidx_write: CompositeWrite<WritePtr>,
let positions_idx_wrt = segment.open_write(SegmentComponent::POSITIONSSKIP)?;
Ok(Self::open(
schema,
terms_wrt,
postings_wrt,
positions_wrt,
positions_idx_wrt,
))
}
/// Open a new `PostingsSerializer` for the given segment
pub(crate) fn open(
schema: Schema, schema: Schema,
terms_wrt: WritePtr, ) -> Result<InvertedIndexSerializer> {
postings_wrt: WritePtr, Ok(InvertedIndexSerializer {
positions_wrt: WritePtr, terms_write,
positions_idx_wrt: WritePtr, postings_write,
) -> InvertedIndexSerializer { positions_write,
InvertedIndexSerializer { positionsidx_write,
terms_write: CompositeWrite::wrap(terms_wrt),
postings_write: CompositeWrite::wrap(postings_wrt),
positions_write: CompositeWrite::wrap(positions_wrt),
positionsidx_write: CompositeWrite::wrap(positions_idx_wrt),
schema, schema,
} })
}
/// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> {
use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
InvertedIndexSerializer::create(
CompositeWrite::wrap(segment.open_write(TERMS)?),
CompositeWrite::wrap(segment.open_write(POSTINGS)?),
CompositeWrite::wrap(segment.open_write(POSITIONS)?),
CompositeWrite::wrap(segment.open_write(POSITIONSSKIP)?),
segment.schema(),
)
} }
/// Must be called before starting pushing terms of /// Must be called before starting pushing terms of

View File

@@ -54,21 +54,21 @@ where
match self.excluding_state { match self.excluding_state {
State::ExcludeOne(excluded_doc) => { State::ExcludeOne(excluded_doc) => {
if doc == excluded_doc { if doc == excluded_doc {
return false; false
} } else if excluded_doc > doc {
if excluded_doc > doc { true
return true; } else {
} match self.excluding_docset.skip_next(doc) {
match self.excluding_docset.skip_next(doc) { SkipResult::OverStep => {
SkipResult::OverStep => { self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc()); true
true }
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
} }
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
} }
} }
State::Finished => true, State::Finished => true,

View File

@@ -33,6 +33,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result, Term}; /// use tantivy::{doc, Index, Result, Term};
/// ///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
@@ -58,6 +59,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
/// ///
/// { /// {
///
/// let term = Term::from_field_text(title, "Diary"); /// let term = Term::from_field_text(title, "Diary");
/// let query = FuzzyTermQuery::new(term, 1, true); /// let query = FuzzyTermQuery::new(term, 1, true);
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap(); /// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
@@ -67,7 +69,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// # assert!(example().is_ok());
/// ``` /// ```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct FuzzyTermQuery { pub struct FuzzyTermQuery {

View File

@@ -4,7 +4,6 @@ use crate::postings::Postings;
use crate::query::bm25::BM25Weight; use crate::query::bm25::BM25Weight;
use crate::query::{Intersection, Scorer}; use crate::query::{Intersection, Scorer};
use crate::DocId; use crate::DocId;
use std::cmp::Ordering;
struct PostingsWithOffset<TPostings> { struct PostingsWithOffset<TPostings> {
offset: u32, offset: u32,
@@ -60,16 +59,12 @@ fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
while left_i < left.len() && right_i < right.len() { while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
match left_val.cmp(&right_val) { if left_val < right_val {
Ordering::Less => { left_i += 1;
left_i += 1; } else if right_val < left_val {
} right_i += 1;
Ordering::Equal => { } else {
return true; return true;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
false false
@@ -82,18 +77,14 @@ fn intersection_count(left: &[u32], right: &[u32]) -> usize {
while left_i < left.len() && right_i < right.len() { while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
match left_val.cmp(&right_val) { if left_val < right_val {
Ordering::Less => { left_i += 1;
left_i += 1; } else if right_val < left_val {
} right_i += 1;
Ordering::Equal => { } else {
count += 1; count += 1;
left_i += 1; left_i += 1;
right_i += 1; right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
count count
@@ -112,19 +103,15 @@ fn intersection(left: &mut [u32], right: &[u32]) -> usize {
while left_i < left_len && right_i < right_len { while left_i < left_len && right_i < right_len {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
match left_val.cmp(&right_val) { if left_val < right_val {
Ordering::Less => { left_i += 1;
left_i += 1; } else if right_val < left_val {
} right_i += 1;
Ordering::Equal => { } else {
left[count] = left_val; left[count] = left_val;
count += 1; count += 1;
left_i += 1; left_i += 1;
right_i += 1; right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
count count

View File

@@ -8,7 +8,7 @@ use crate::query::PhraseQuery;
use crate::query::Query; use crate::query::Query;
use crate::query::RangeQuery; use crate::query::RangeQuery;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::{Facet, IndexRecordOption}; use crate::schema::IndexRecordOption;
use crate::schema::{Field, Schema}; use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term}; use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager; use crate::tokenizer::TokenizerManager;
@@ -319,10 +319,7 @@ impl QueryParser {
)) ))
} }
} }
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => Ok(vec![(0, Term::from_field_text(field, phrase))]),
let facet = Facet::from_text(phrase);
Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))])
}
FieldType::Bytes => { FieldType::Bytes => {
let field_name = self.schema.get_field_name(field).to_string(); let field_name = self.schema.get_field_name(field).to_string();
Err(QueryParserError::FieldNotIndexed(field_name)) Err(QueryParserError::FieldNotIndexed(field_name))
@@ -557,7 +554,6 @@ mod test {
schema_builder.add_text_field("with_stop_words", text_options); schema_builder.add_text_field("with_stop_words", text_options);
schema_builder.add_date_field("date", INDEXED); schema_builder.add_date_field("date", INDEXED);
schema_builder.add_f64_field("float", INDEXED); schema_builder.add_f64_field("float", INDEXED);
schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let default_fields = vec![title, text]; let default_fields = vec![title, text];
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
@@ -592,13 +588,9 @@ mod test {
} }
#[test] #[test]
pub fn test_parse_query_facet() { pub fn test_parse_query_simple() {
let query_parser = make_query_parser(); let query_parser = make_query_parser();
let query = query_parser.parse_query("facet:/root/branch/leaf").unwrap(); assert!(query_parser.parse_query("toto").is_ok());
assert_eq!(
format!("{:?}", query),
"TermQuery(Term(field=11,bytes=[114, 111, 111, 116, 0, 98, 114, 97, 110, 99, 104, 0, 108, 101, 97, 102]))"
);
} }
#[test] #[test]

View File

@@ -38,33 +38,41 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// use tantivy::collector::Count; /// # use tantivy::collector::Count;
/// use tantivy::query::RangeQuery; /// # use tantivy::query::RangeQuery;
/// use tantivy::schema::{Schema, INDEXED}; /// # use tantivy::schema::{Schema, INDEXED};
/// use tantivy::{doc, Index}; /// # use tantivy::{doc, Index, Result};
/// # fn test() -> tantivy::Result<()> { /// #
/// let mut schema_builder = Schema::builder(); /// # fn run() -> Result<()> {
/// let year_field = schema_builder.add_u64_field("year", INDEXED); /// # let mut schema_builder = Schema::builder();
/// let schema = schema_builder.build(); /// # let year_field = schema_builder.add_u64_field("year", INDEXED);
/// /// # let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// #
/// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?; /// # let index = Index::create_in_ram(schema);
/// for year in 1950u64..2017u64 { /// # {
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950); /// # let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
/// for _ in 0..num_docs_within_year { /// # for year in 1950u64..2017u64 {
/// index_writer.add_document(doc!(year_field => year)); /// # let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
/// } /// # for _ in 0..num_docs_within_year {
/// } /// # index_writer.add_document(doc!(year_field => year));
/// index_writer.commit()?; /// # }
/// /// # }
/// let reader = index.reader()?; /// # index_writer.commit().unwrap();
/// # }
/// # let reader = index.reader()?;
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
///
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970); /// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
///
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?; /// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
/// assert_eq!(num_60s_books, 2285); ///
/// Ok(()) /// # assert_eq!(num_60s_books, 2285);
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # run().unwrap()
/// # } /// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RangeQuery { pub struct RangeQuery {

View File

@@ -15,40 +15,40 @@ use tantivy_fst::Regex;
/// use tantivy::collector::Count; /// use tantivy::collector::Count;
/// use tantivy::query::RegexQuery; /// use tantivy::query::RegexQuery;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Term}; /// use tantivy::{doc, Index, Result, Term};
/// ///
/// # fn test() -> tantivy::Result<()> { /// # fn main() { example().unwrap(); }
/// let mut schema_builder = Schema::builder(); /// fn example() -> Result<()> {
/// let title = schema_builder.add_text_field("title", TEXT); /// let mut schema_builder = Schema::builder();
/// let schema = schema_builder.build(); /// let title = schema_builder.add_text_field("title", TEXT);
/// let index = Index::create_in_ram(schema); /// let schema = schema_builder.build();
/// { /// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer(3_000_000)?; /// {
/// index_writer.add_document(doc!( /// let mut index_writer = index.writer(3_000_000)?;
/// title => "The Name of the Wind", /// index_writer.add_document(doc!(
/// )); /// title => "The Name of the Wind",
/// index_writer.add_document(doc!( /// ));
/// title => "The Diary of Muadib", /// index_writer.add_document(doc!(
/// )); /// title => "The Diary of Muadib",
/// index_writer.add_document(doc!( /// ));
/// title => "A Dairy Cow", /// index_writer.add_document(doc!(
/// )); /// title => "A Dairy Cow",
/// index_writer.add_document(doc!( /// ));
/// title => "The Diary of a Young Girl", /// index_writer.add_document(doc!(
/// )); /// title => "The Diary of a Young Girl",
/// index_writer.commit().unwrap(); /// ));
/// index_writer.commit().unwrap();
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// } /// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct RegexQuery { pub struct RegexQuery {

View File

@@ -23,39 +23,42 @@ use std::fmt;
/// use tantivy::collector::{Count, TopDocs}; /// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::TermQuery; /// use tantivy::query::TermQuery;
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption}; /// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
/// use tantivy::{doc, Index, Term}; /// use tantivy::{doc, Index, Result, Term};
/// # fn test() -> tantivy::Result<()> { ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// { /// let schema = schema_builder.build();
/// let mut index_writer = index.writer(3_000_000)?; /// let index = Index::create_in_ram(schema);
/// index_writer.add_document(doc!( /// {
/// title => "The Name of the Wind", /// let mut index_writer = index.writer(3_000_000)?;
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "The Name of the Wind",
/// title => "The Diary of Muadib", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "The Diary of Muadib",
/// title => "A Dairy Cow", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "A Dairy Cow",
/// title => "The Diary of a Young Girl", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.commit()?; /// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit()?;
/// }
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
/// assert_eq!(count, 2);
///
/// Ok(())
/// } /// }
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count))?;
/// assert_eq!(count, 2);
/// Ok(())
/// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Clone)] #[derive(Clone)]
pub struct TermQuery { pub struct TermQuery {

View File

@@ -162,11 +162,6 @@ pub struct IndexReader {
} }
impl IndexReader { impl IndexReader {
#[cfg(test)]
pub(crate) fn index(&self) -> Index {
self.inner.index.clone()
}
/// Update searchers so that they reflect the state of the last /// Update searchers so that they reflect the state of the last
/// `.commit()`. /// `.commit()`.
/// ///

View File

@@ -167,7 +167,7 @@ mod tests {
use super::Pool; use super::Pool;
use super::Queue; use super::Queue;
use std::{iter, mem}; use std::iter;
#[test] #[test]
fn test_pool() { fn test_pool() {
@@ -197,67 +197,33 @@ mod tests {
fn test_pool_dont_panic_on_empty_pop() { fn test_pool_dont_panic_on_empty_pop() {
// When the object pool is exhausted, it shouldn't panic on pop() // When the object pool is exhausted, it shouldn't panic on pop()
use std::sync::Arc; use std::sync::Arc;
use std::thread; use std::{thread, time};
// Wrap the pool in an Arc, same way as its used in `core/index.rs` // Wrap the pool in an Arc, same way as its used in `core/index.rs`
let pool1 = Arc::new(Pool::new()); let pool = Arc::new(Pool::new());
// clone pools outside the move scope of each new thread // clone pools outside the move scope of each new thread
let pool2 = Arc::clone(&pool1); let pool1 = Arc::clone(&pool);
let pool3 = Arc::clone(&pool1); let pool2 = Arc::clone(&pool);
let elements_for_pool = vec![1, 2]; let elements_for_pool = vec![1, 2];
pool1.publish_new_generation(elements_for_pool); pool.publish_new_generation(elements_for_pool);
let mut threads = vec![]; let mut threads = vec![];
let sleep_dur = time::Duration::from_millis(10);
// spawn one more thread than there are elements in the pool // spawn one more thread than there are elements in the pool
let (start_1_send, start_1_recv) = crossbeam::bounded(0);
let (start_2_send, start_2_recv) = crossbeam::bounded(0);
let (start_3_send, start_3_recv) = crossbeam::bounded(0);
let (event_send1, event_recv) = crossbeam::unbounded();
let event_send2 = event_send1.clone();
let event_send3 = event_send1.clone();
threads.push(thread::spawn(move || { threads.push(thread::spawn(move || {
assert_eq!(start_1_recv.recv(), Ok("start")); // leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool.acquire();
thread::sleep(sleep_dur);
}));
threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool1.acquire(); let _leased_searcher = &pool1.acquire();
assert!(event_send1.send("1 acquired").is_ok()); thread::sleep(sleep_dur);
assert_eq!(start_1_recv.recv(), Ok("stop"));
assert!(event_send1.send("1 stopped").is_ok());
mem::drop(_leased_searcher);
})); }));
threads.push(thread::spawn(move || { threads.push(thread::spawn(move || {
assert_eq!(start_2_recv.recv(), Ok("start")); // leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool2.acquire(); let _leased_searcher = &pool2.acquire();
assert!(event_send2.send("2 acquired").is_ok()); thread::sleep(sleep_dur);
assert_eq!(start_2_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);
assert!(event_send2.send("2 stopped").is_ok());
})); }));
threads.push(thread::spawn(move || {
assert_eq!(start_3_recv.recv(), Ok("start"));
let _leased_searcher = &pool3.acquire();
assert!(event_send3.send("3 acquired").is_ok());
assert_eq!(start_3_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);
assert!(event_send3.send("3 stopped").is_ok());
}));
assert!(start_1_send.send("start").is_ok());
assert_eq!(event_recv.recv(), Ok("1 acquired"));
assert!(start_2_send.send("start").is_ok());
assert_eq!(event_recv.recv(), Ok("2 acquired"));
assert!(start_3_send.send("start").is_ok());
assert!(event_recv.try_recv().is_err());
assert!(start_1_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("1 stopped"));
assert_eq!(event_recv.recv(), Ok("3 acquired"));
assert!(start_3_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("3 stopped"));
assert!(start_2_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("2 stopped"));
} }
} }

View File

@@ -155,21 +155,6 @@ impl Document {
.find(|field_value| field_value.field() == field) .find(|field_value| field_value.field() == field)
.map(FieldValue::value) .map(FieldValue::value)
} }
/// Prepares Document for being stored in the document store
///
/// Method transforms PreTokenizedString values into String
/// values.
pub fn prepare_for_store(&mut self) {
for field_value in &mut self.field_values {
if let Value::PreTokStr(pre_tokenized_text) = field_value.value() {
*field_value = FieldValue::new(
field_value.field(),
Value::Str(pre_tokenized_text.text.clone()), //< TODO somehow remove .clone()
);
}
}
}
} }
impl BinarySerializable for Document { impl BinarySerializable for Document {
@@ -195,7 +180,6 @@ impl BinarySerializable for Document {
mod tests { mod tests {
use crate::schema::*; use crate::schema::*;
use crate::tokenizer::{PreTokenizedString, Token};
#[test] #[test]
fn test_doc() { fn test_doc() {
@@ -205,38 +189,4 @@ mod tests {
doc.add_text(text_field, "My title"); doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1); assert_eq!(doc.field_values().len(), 1);
} }
#[test]
fn test_prepare_for_store() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("title", TEXT);
let mut doc = Document::default();
let pre_tokenized_text = PreTokenizedString {
text: String::from("A"),
tokens: vec![Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
}],
};
doc.add_pre_tokenized_text(text_field, &pre_tokenized_text);
doc.add_text(text_field, "title");
doc.prepare_for_store();
assert_eq!(doc.field_values().len(), 2);
match doc.field_values()[0].value() {
Value::Str(ref text) => assert_eq!(text, "A"),
_ => panic!("Incorrect variant of Value"),
}
match doc.field_values()[1].value() {
Value::Str(ref text) => assert_eq!(text, "title"),
_ => panic!("Incorrect variant of Value"),
}
}
} }

View File

@@ -6,7 +6,6 @@ use crate::schema::TextFieldIndexing;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{IntOptions, TextOptions}; use crate::schema::{IntOptions, TextOptions};
use crate::tokenizer::PreTokenizedString; use crate::tokenizer::PreTokenizedString;
use chrono::{FixedOffset, Utc};
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
/// Possible error that may occur while parsing a field value /// Possible error that may occur while parsing a field value
@@ -125,20 +124,13 @@ impl FieldType {
pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> { pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> {
match *json { match *json {
JsonValue::String(ref field_text) => match *self { JsonValue::String(ref field_text) => match *self {
FieldType::Date(_) => {
let dt_with_fixed_tz: chrono::DateTime<FixedOffset> =
chrono::DateTime::parse_from_rfc3339(field_text).map_err(|err|
ValueParsingError::TypeError(format!(
"Failed to parse date from JSON. Expected rfc3339 format, got {}. {:?}",
field_text, err
))
)?;
Ok(Value::Date(dt_with_fixed_tz.with_timezone(&Utc)))
}
FieldType::Str(_) => Ok(Value::Str(field_text.clone())), FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) => Err( FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {
ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)), Err(ValueParsingError::TypeError(format!(
), "Expected an integer, got {:?}",
json
)))
}
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))), FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| { FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
ValueParsingError::InvalidBase64(format!( ValueParsingError::InvalidBase64(format!(
@@ -216,35 +208,7 @@ mod tests {
use crate::schema::field_type::ValueParsingError; use crate::schema::field_type::ValueParsingError;
use crate::schema::TextOptions; use crate::schema::TextOptions;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{Schema, INDEXED};
use crate::tokenizer::{PreTokenizedString, Token}; use crate::tokenizer::{PreTokenizedString, Token};
use crate::{DateTime, Document};
use chrono::{NaiveDate, NaiveDateTime, NaiveTime, Utc};
#[test]
fn test_deserialize_json_date() {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", INDEXED);
let schema = schema_builder.build();
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
let doc = schema.parse_document(doc_json).unwrap();
let date = doc.get_first(date_field).unwrap();
assert_eq!(format!("{:?}", date), "Date(2019-10-12T05:20:50.520Z)");
}
#[test]
fn test_serialize_json_date() {
let mut doc = Document::new();
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", INDEXED);
let schema = schema_builder.build();
let naive_date = NaiveDate::from_ymd(1982, 9, 17);
let naive_time = NaiveTime::from_hms(13, 20, 00);
let date_time = DateTime::from_utc(NaiveDateTime::new(naive_date, naive_time), Utc);
doc.add_date(date_field, &date_time);
let doc_json = schema.to_json(&doc);
assert_eq!(doc_json, r#"{"date":["1982-09-17T13:20:00+00:00"]}"#);
}
#[test] #[test]
fn test_bytes_value_from_json() { fn test_bytes_value_from_json() {

View File

@@ -53,7 +53,7 @@ where
fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output { fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output {
SchemaFlagList { SchemaFlagList {
head: head.head, head: head.head,
tail: self, tail: self.clone(),
} }
} }
} }

View File

@@ -44,7 +44,7 @@ We can split the problem of generating a search result page into two phases :
the search results page. (`doc_ids[] -> Document[]`) the search results page. (`doc_ids[] -> Document[]`)
In the first phase, the ability to search for documents by the given field is determined by the In the first phase, the ability to search for documents by the given field is determined by the
[`IndexRecordOption`](enum.IndexRecordOption.html) of our [`TextIndexingOptions`](enum.TextIndexingOptions.html) of our
[`TextOptions`](struct.TextOptions.html). [`TextOptions`](struct.TextOptions.html).
The effect of each possible setting is described more in detail The effect of each possible setting is described more in detail

View File

@@ -166,7 +166,7 @@ impl SchemaBuilder {
} }
/// Adds a field entry to the schema in build. /// Adds a field entry to the schema in build.
pub fn add_field(&mut self, field_entry: FieldEntry) -> Field { fn add_field(&mut self, field_entry: FieldEntry) -> Field {
let field = Field::from_field_id(self.fields.len() as u32); let field = Field::from_field_id(self.fields.len() as u32);
let field_name = field_entry.name().to_string(); let field_name = field_entry.name().to_string();
self.fields.push(field_entry); self.fields.push(field_entry);
@@ -401,7 +401,6 @@ pub enum DocParsingError {
mod tests { mod tests {
use crate::schema::field_type::ValueParsingError; use crate::schema::field_type::ValueParsingError;
use crate::schema::int_options::Cardinality::SingleValue;
use crate::schema::schema::DocParsingError::NotJSON; use crate::schema::schema::DocParsingError::NotJSON;
use crate::schema::*; use crate::schema::*;
use matches::{assert_matches, matches}; use matches::{assert_matches, matches};
@@ -716,94 +715,4 @@ mod tests {
assert_matches!(json_err, Err(NotJSON(_))); assert_matches!(json_err, Err(NotJSON(_)));
} }
} }
#[test]
pub fn test_schema_add_field() {
let mut schema_builder = SchemaBuilder::default();
let id_options = TextOptions::default().set_stored().set_indexing_options(
TextFieldIndexing::default()
.set_tokenizer("raw")
.set_index_option(IndexRecordOption::Basic),
);
let timestamp_options = IntOptions::default()
.set_stored()
.set_indexed()
.set_fast(SingleValue);
schema_builder.add_text_field("_id", id_options);
schema_builder.add_date_field("_timestamp", timestamp_options);
let schema_content = r#"[
{
"name": "text",
"type": "text",
"options": {
"indexing": {
"record": "position",
"tokenizer": "default"
},
"stored": false
}
},
{
"name": "popularity",
"type": "i64",
"options": {
"indexed": false,
"fast": "single",
"stored": true
}
}
]"#;
let tmp_schema: Schema =
serde_json::from_str(&schema_content).expect("error while reading json");
for (_field, field_entry) in tmp_schema.fields() {
schema_builder.add_field(field_entry.clone());
}
let schema = schema_builder.build();
let schema_json = serde_json::to_string_pretty(&schema).unwrap();
let expected = r#"[
{
"name": "_id",
"type": "text",
"options": {
"indexing": {
"record": "basic",
"tokenizer": "raw"
},
"stored": true
}
},
{
"name": "_timestamp",
"type": "date",
"options": {
"indexed": true,
"fast": "single",
"stored": true
}
},
{
"name": "text",
"type": "text",
"options": {
"indexing": {
"record": "position",
"tokenizer": "default"
},
"stored": false
}
},
{
"name": "popularity",
"type": "i64",
"options": {
"indexed": false,
"fast": "single",
"stored": true
}
}
]"#;
assert_eq!(schema_json, expected);
}
} }

View File

@@ -75,7 +75,7 @@ impl Serialize for Value {
Value::U64(u) => serializer.serialize_u64(u), Value::U64(u) => serializer.serialize_u64(u),
Value::I64(u) => serializer.serialize_i64(u), Value::I64(u) => serializer.serialize_i64(u),
Value::F64(u) => serializer.serialize_f64(u), Value::F64(u) => serializer.serialize_f64(u),
Value::Date(ref date) => serializer.serialize_str(&date.to_rfc3339()), Value::Date(ref date) => serializer.serialize_i64(date.timestamp()),
Value::Facet(ref facet) => facet.serialize(serializer), Value::Facet(ref facet) => facet.serialize(serializer),
Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes), Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes),
} }
@@ -96,14 +96,14 @@ impl<'de> Deserialize<'de> for Value {
formatter.write_str("a string or u32") formatter.write_str("a string or u32")
} }
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
Ok(Value::I64(v))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> { fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
Ok(Value::U64(v)) Ok(Value::U64(v))
} }
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
Ok(Value::I64(v))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> { fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> {
Ok(Value::F64(v)) Ok(Value::F64(v))
} }
@@ -209,8 +209,8 @@ impl From<f64> for Value {
} }
} }
impl From<crate::DateTime> for Value { impl From<DateTime> for Value {
fn from(date_time: crate::DateTime) -> Value { fn from(date_time: DateTime) -> Value {
Value::Date(date_time) Value::Date(date_time)
} }
} }
@@ -233,12 +233,6 @@ impl From<Vec<u8>> for Value {
} }
} }
impl From<PreTokenizedString> for Value {
fn from(pretokenized_string: PreTokenizedString) -> Value {
Value::PreTokStr(pretokenized_string)
}
}
mod binary_serialize { mod binary_serialize {
use super::Value; use super::Value;
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable}; use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
@@ -362,17 +356,3 @@ mod binary_serialize {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::Value;
use crate::DateTime;
use std::str::FromStr;
#[test]
fn test_serialize_date() {
let value = Value::Date(DateTime::from_str("1996-12-20T00:39:57+00:00").unwrap());
let serialized_value_json = serde_json::to_string_pretty(&value).unwrap();
assert_eq!(serialized_value_json, r#""1996-12-20T00:39:57+00:00""#);
}
}

View File

@@ -331,8 +331,9 @@ mod tests {
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::iter::Iterator; use std::iter::Iterator;
const TEST_TEXT: &'static str = r#"Rust is a systems programming language sponsored by const TEST_TEXT: &'static str =
Mozilla which describes it as a "safe, concurrent, practical language", supporting functional and r#"Rust is a systems programming language sponsored by Mozilla which
describes it as a "safe, concurrent, practical language", supporting functional and
imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?], imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?],
but its designers intend it to provide better memory safety while still maintaining but its designers intend it to provide better memory safety while still maintaining
performance. performance.
@@ -362,13 +363,13 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT); let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!( assert_eq!(
snippet.fragments, snippet.fragments,
"Rust is a systems programming language sponsored by\n\ "Rust is a systems programming language sponsored by \
Mozilla which describes it as a \"safe" Mozilla which\ndescribes it as a \"safe"
); );
assert_eq!( assert_eq!(
snippet.to_html(), snippet.to_html(),
"<b>Rust</b> is a systems programming <b>language</b> \ "<b>Rust</b> is a systems programming <b>language</b> \
sponsored by\nMozilla which describes it as a &quot;safe" sponsored by Mozilla which\ndescribes it as a &quot;safe"
) )
} }

View File

@@ -1,9 +1,6 @@
use std::io::{self, Read, Write}; extern crate lz4;
/// Name of the compression scheme used in the doc store. use std::io::{self, Read, Write};
///
/// This name is appended to the version string of tantivy.
pub const COMPRESSION: &'static str = "lz4";
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> { pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
compressed.clear(); compressed.clear();

View File

@@ -2,11 +2,6 @@ use snap;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
/// Name of the compression scheme used in the doc store.
///
/// This name is appended to the version string of tantivy.
pub const COMPRESSION: &str = "snappy";
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> { pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
compressed.clear(); compressed.clear();
let mut encoder = snap::Writer::new(compressed); let mut encoder = snap::Writer::new(compressed);

View File

@@ -42,22 +42,18 @@ pub use self::writer::StoreWriter;
#[cfg(feature = "lz4")] #[cfg(feature = "lz4")]
mod compression_lz4; mod compression_lz4;
#[cfg(feature = "lz4")] #[cfg(feature = "lz4")]
pub use self::compression_lz4::COMPRESSION; use self::compression_lz4::*;
#[cfg(feature = "lz4")]
use self::compression_lz4::{compress, decompress};
#[cfg(not(feature = "lz4"))] #[cfg(not(feature = "lz4"))]
mod compression_snap; mod compression_snap;
#[cfg(not(feature = "lz4"))] #[cfg(not(feature = "lz4"))]
pub use self::compression_snap::COMPRESSION; use self::compression_snap::*;
#[cfg(not(feature = "lz4"))]
use self::compression_snap::{compress, decompress};
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::schema::Document; use crate::schema::Document;
use crate::schema::FieldValue; use crate::schema::FieldValue;
use crate::schema::Schema; use crate::schema::Schema;

View File

@@ -36,7 +36,7 @@ pub use self::termdict::{TermDictionary, TermDictionaryBuilder};
mod tests { mod tests {
use super::{TermDictionary, TermDictionaryBuilder, TermStreamer}; use super::{TermDictionary, TermDictionaryBuilder, TermStreamer};
use crate::core::Index; use crate::core::Index;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, ReadOnlySource}; use crate::directory::{Directory, RAMDirectory, ReadOnlySource};
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::schema::{Document, FieldType, Schema, TEXT}; use crate::schema::{Document, FieldType, Schema, TEXT};
use std::path::PathBuf; use std::path::PathBuf;

View File

@@ -2,6 +2,8 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//!
//! let tokenizer = RawTokenizer //! let tokenizer = RawTokenizer
//! .filter(AlphaNumOnlyFilter); //! .filter(AlphaNumOnlyFilter);
//! //!
@@ -18,6 +20,7 @@
//! assert!(stream.next().is_some()); //! assert!(stream.next().is_some());
//! // the "emoji" is dropped because its not an alphanum //! // the "emoji" is dropped because its not an alphanum
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};

View File

@@ -7,6 +7,7 @@
//! ```rust //! ```rust
//! use tantivy::schema::*; //! use tantivy::schema::*;
//! //!
//! # fn main() {
//! let mut schema_builder = Schema::builder(); //! let mut schema_builder = Schema::builder();
//! //!
//! let text_options = TextOptions::default() //! let text_options = TextOptions::default()
@@ -30,6 +31,7 @@
//! schema_builder.add_text_field("uuid", id_options); //! schema_builder.add_text_field("uuid", id_options);
//! //!
//! let schema = schema_builder.build(); //! let schema = schema_builder.build();
//! # }
//! ``` //! ```
//! //!
//! By default, `tantivy` offers the following tokenizers: //! By default, `tantivy` offers the following tokenizers:
@@ -64,10 +66,12 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//! let en_stem = SimpleTokenizer //! let en_stem = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(40)) //! .filter(RemoveLongFilter::limit(40))
//! .filter(LowerCaser) //! .filter(LowerCaser)
//! .filter(Stemmer::new(Language::English)); //! .filter(Stemmer::new(Language::English));
//! # }
//! ``` //! ```
//! //!
//! Once your tokenizer is defined, you need to //! Once your tokenizer is defined, you need to
@@ -77,12 +81,13 @@
//! # use tantivy::schema::Schema; //! # use tantivy::schema::Schema;
//! # use tantivy::tokenizer::*; //! # use tantivy::tokenizer::*;
//! # use tantivy::Index; //! # use tantivy::Index;
//! # //! # fn main() {
//! let custom_en_tokenizer = SimpleTokenizer; //! # let custom_en_tokenizer = SimpleTokenizer;
//! # let schema = Schema::builder().build(); //! # let schema = Schema::builder().build();
//! let index = Index::create_in_ram(schema); //! let index = Index::create_in_ram(schema);
//! index.tokenizers() //! index.tokenizers()
//! .register("custom_en", custom_en_tokenizer); //! .register("custom_en", custom_en_tokenizer);
//! # }
//! ``` //! ```
//! //!
//! If you built your schema programmatically, a complete example //! If you built your schema programmatically, a complete example
@@ -97,6 +102,7 @@
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! use tantivy::Index; //! use tantivy::Index;
//! //!
//! # fn main() {
//! let mut schema_builder = Schema::builder(); //! let mut schema_builder = Schema::builder();
//! let text_field_indexing = TextFieldIndexing::default() //! let text_field_indexing = TextFieldIndexing::default()
//! .set_tokenizer("custom_en") //! .set_tokenizer("custom_en")
@@ -115,6 +121,8 @@
//! index //! index
//! .tokenizers() //! .tokenizers()
//! .register("custom_en", custom_en_tokenizer); //! .register("custom_en", custom_en_tokenizer);
//! // ...
//! # }
//! ``` //! ```
//! //!
mod alphanum_only; mod alphanum_only;

View File

@@ -31,7 +31,7 @@ use super::{Token, TokenStream, Tokenizer};
/// ///
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// /// # fn main() {
/// let tokenizer = NgramTokenizer::new(2, 3, false); /// let tokenizer = NgramTokenizer::new(2, 3, false);
/// let mut stream = tokenizer.token_stream("hello"); /// let mut stream = tokenizer.token_stream("hello");
/// { /// {
@@ -77,6 +77,7 @@ use super::{Token, TokenStream, Tokenizer};
/// assert_eq!(token.offset_to, 5); /// assert_eq!(token.offset_to, 5);
/// } /// }
/// assert!(stream.next().is_none()); /// assert!(stream.next().is_none());
/// # }
/// ``` /// ```
#[derive(Clone)] #[derive(Clone)]
pub struct NgramTokenizer { pub struct NgramTokenizer {

View File

@@ -2,6 +2,8 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//!
//! let tokenizer = SimpleTokenizer //! let tokenizer = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(5)); //! .filter(RemoveLongFilter::limit(5));
//! //!
@@ -10,6 +12,7 @@
//! // out of the token stream. //! // out of the token stream.
//! assert_eq!(stream.next().unwrap().text, "nice"); //! assert_eq!(stream.next().unwrap().text, "nice");
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
//! //!
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};

View File

@@ -15,7 +15,6 @@ pub enum Language {
Greek, Greek,
Hungarian, Hungarian,
Italian, Italian,
Norwegian,
Portuguese, Portuguese,
Romanian, Romanian,
Russian, Russian,
@@ -39,7 +38,6 @@ impl Language {
Greek => Algorithm::Greek, Greek => Algorithm::Greek,
Hungarian => Algorithm::Hungarian, Hungarian => Algorithm::Hungarian,
Italian => Algorithm::Italian, Italian => Algorithm::Italian,
Norwegian => Algorithm::Norwegian,
Portuguese => Algorithm::Portuguese, Portuguese => Algorithm::Portuguese,
Romanian => Algorithm::Romanian, Romanian => Algorithm::Romanian,
Russian => Algorithm::Russian, Russian => Algorithm::Russian,

View File

@@ -2,6 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//! let tokenizer = SimpleTokenizer //! let tokenizer = SimpleTokenizer
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()])); //! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
//! //!
@@ -9,6 +10,7 @@
//! assert_eq!(stream.next().unwrap().text, "fox"); //! assert_eq!(stream.next().unwrap().text, "fox");
//! assert_eq!(stream.next().unwrap().text, "crafty"); //! assert_eq!(stream.next().unwrap().text, "crafty");
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use fnv::FnvHasher; use fnv::FnvHasher;
@@ -44,7 +46,7 @@ impl StopWordFilter {
"there", "these", "they", "this", "to", "was", "will", "with", "there", "these", "they", "this", "to", "was", "will", "with",
]; ];
StopWordFilter::remove(words.iter().map(|&s| s.to_string()).collect()) StopWordFilter::remove(words.iter().map(|s| s.to_string()).collect())
} }
} }

View File

@@ -58,10 +58,12 @@ pub trait Tokenizer<'a>: Sized + Clone {
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// # fn main() {
/// let en_stem = SimpleTokenizer /// let en_stem = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser) /// .filter(LowerCaser)
/// .filter(Stemmer::default()); /// .filter(Stemmer::default());
/// # }
/// ``` /// ```
/// ///
fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self> fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self>
@@ -186,6 +188,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// ``` /// ```
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// # fn main() {
/// let tokenizer = SimpleTokenizer /// let tokenizer = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser); /// .filter(LowerCaser);
@@ -204,6 +207,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// assert_eq!(token.offset_to, 12); /// assert_eq!(token.offset_to, 12);
/// assert_eq!(token.position, 1); /// assert_eq!(token.position, 1);
/// } /// }
/// # }
/// ``` /// ```
/// ///
pub trait TokenStream { pub trait TokenStream {
@@ -223,15 +227,17 @@ pub trait TokenStream {
/// and `.token()`. /// and `.token()`.
/// ///
/// ``` /// ```
/// use tantivy::tokenizer::*; /// # use tantivy::tokenizer::*;
/// /// #
/// let tokenizer = SimpleTokenizer /// # fn main() {
/// .filter(RemoveLongFilter::limit(40)) /// # let tokenizer = SimpleTokenizer
/// .filter(LowerCaser); /// # .filter(RemoveLongFilter::limit(40))
/// # .filter(LowerCaser);
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer"); /// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
/// while let Some(token) = token_stream.next() { /// while let Some(token) = token_stream.next() {
/// println!("Token {:?}", token.text); /// println!("Token {:?}", token.text);
/// } /// }
/// # }
/// ``` /// ```
fn next(&mut self) -> Option<&Token> { fn next(&mut self) -> Option<&Token> {
if self.advance() { if self.advance() {

View File

@@ -1,8 +1,6 @@
use fail; use fail;
use std::path::Path; use std::path::Path;
use tantivy::directory::{ use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite};
Directory, ManagedDirectory, RAMDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use tantivy::doc; use tantivy::doc;
use tantivy::schema::{Schema, TEXT}; use tantivy::schema::{Schema, TEXT};
use tantivy::{Index, Term}; use tantivy::{Index, Term};
@@ -30,11 +28,11 @@ fn test_failpoints_managed_directory_gc_if_delete_fails() {
// The initial 1*off is there to allow for the removal of the // The initial 1*off is there to allow for the removal of the
// lock file. // lock file.
fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap(); fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap();
assert!(managed_directory.garbage_collect(Default::default).is_ok()); managed_directory.garbage_collect(Default::default);
assert!(managed_directory.exists(test_path)); assert!(managed_directory.exists(test_path));
// running the gc a second time should remove the file. // running the gc a second time should remove the file.
assert!(managed_directory.garbage_collect(Default::default).is_ok()); managed_directory.garbage_collect(Default::default);
assert!( assert!(
!managed_directory.exists(test_path), !managed_directory.exists(test_path),
"The file should have been deleted" "The file should have been deleted"