Compare commits

...

41 Commits
0.1 ... bundle

Author SHA1 Message Date
Paul Masurel
d7973892a2 extra commit 2019-12-27 22:53:04 +09:00
Paul Masurel
cd7484c035 Added ReadOnlyDirectory and implemented Bundle Directory 2019-12-27 12:05:39 +09:00
Paul Masurel
7ed6bc8718 Added serialize to bundle in the RAMDirectory. 2019-12-26 10:06:52 +09:00
Paul Masurel
d12a06b65b Tiny code simplification. 2019-12-26 09:33:17 +09:00
Minoru Osuka
749432f949 Make SchemaBuilder::add_field() public (#742)
* Make add_field() to public

* cargo format
2019-12-25 20:37:34 +09:00
Paul Masurel
c1400f25a7 Handle facet search in the QueryParser. (#741)
Closes #738
2019-12-25 17:43:33 +09:00
Paul Masurel
87120acf7c Bump version 2019-12-20 21:22:43 +09:00
Paul Masurel
401f74f7ae Implement fast field for DateTime. (#736) 2019-12-20 21:20:15 +09:00
Paul Masurel
03d31f6713 Update CHANGELOG 2019-12-19 10:07:43 +09:00
Paul Masurel
a57faf07f6 Added a constructor for WatchHandle (#734)
Closes #731
2019-12-19 10:06:02 +09:00
Paul Masurel
562ea9a839 Merge branch 'master' of github.com:tantivy-search/tantivy 2019-12-19 09:32:50 +09:00
Paul Masurel
cf92cc1ada Closes #732 (#733)
The future returned by `IndexWriter::merge` does not borrow `&mut self`
2019-12-18 23:25:22 +09:00
Paul Masurel
f6000aece7 Closes #732
The future returned by `IndexWriter::merge` does not borrow `&mut self`
2019-12-18 21:48:51 +09:00
Paul Masurel
2b3fe3a2b5 Bumped version for hotfix 2019-12-17 21:10:50 +09:00
Paul Masurel
0fde90faac Closes #729 (#730)
Bug related with merge and deletes...
2019-12-17 21:09:08 +09:00
Paul Masurel
5838644b03 Added README in tantivy-query-grammar 2019-12-16 08:41:21 +09:00
Paul Masurel
c0011edd05 Added version for tantivy-grammar before publish 2019-12-16 08:35:17 +09:00
petr-tik
431c187a60 Make error handling richer in Footer::is_compatible (#724)
* WIP implemented is_compatible

hide Footer::from_bytes from public consumption - only found Footer::extract
used outside the module

Add a new error type for IncompatibleIndex
add a prototypical call to footer.is_compatible() in ManagedDirectory::open_read
to make sure we error before reading it further

* Make error handling more ergonomic

Add an error subtype for OpenReadError and converters to TantivyError

* Remove an unnecessary assert

it's follower by the same check that Errors instead of panicking

* Correct the compatibility check logic

Leave a defensive versioned footer check to make sure we add new logic handling
when we add possible footer versions

Restricted VersionedFooter::from_bytes to be used inside the crate only

remove a half-baked test

* WIP.

* Return an error if index incompatible - closes #662

Enrich the error type with incompatibility

Change return type to Result<bool, TantivyError>, instead of bool

Add an Incompatibility enum that enriches the IncompatibleIndex error variant
with information, which then allows us to generate a developer-friendly hint how
to upgrade library version or switch feature flags for a different compression
algorithm

Updated changelog

Change the signature of is_compatible

Added documentation to the Incompatibility
Added a conditional test on a Footer with lz4 erroring
2019-12-14 09:14:33 +09:00
Caio Romão
392abec420 Make u64_lenient() handle f64 fast fields too (#726)
* Make u64_lenient() handle f64 fast fields too

Without this, we get a panic during merge since the merger will
get a `None` where it expects something.

Prior to this patch, you can reproduce the panic with:

    use tantivy::{
        self,
        schema::{SchemaBuilder, FAST},
        Document, Index, Result,
    };

    #[test]
    fn pass() -> Result<()> {
        let mut builder = SchemaBuilder::new();
        let field = builder.add_f64_field("f64", FAST);
        let index = Index::create_in_ram(builder.build());

        let mut writer = index.writer_with_num_threads(1, 50_000_000)?;

        for i in 0..1000 {
            let mut doc = Document::new();
            doc.add_f64(field, 0.42);
            writer.add_document(doc);

            if i % 5 == 0 {
                writer.commit()?;
            }
        }

        writer.commit()?;

        Ok(())
    }

* Add test to verify that f64 fields are merged

* Ensure multi-valued fast fields can be merged too
2019-12-13 23:41:22 +09:00
Paul Masurel
dfbe337fe2 Optimize deletes (#723)
Closes #710
2019-12-13 09:50:00 +09:00
Paul Masurel
b9896c4962 Cleanup 2019-12-10 23:01:07 +09:00
Paul Masurel
afa5715e56 Added unit test. 2019-12-10 22:49:32 +09:00
Paul Masurel
79474288d0 Some clippy minor fixes (#722) 2019-12-09 13:40:04 +09:00
Paul Masurel
daf64487b4 Fixing JSON se/deserialization of dates. (#721)
Closes #719
2019-12-09 13:31:35 +09:00
Ximo Guanter
00816f5529 Fix outdated reference in documentation (#720) 2019-12-08 18:10:50 +09:00
Paul Masurel
f73787e6e5 Merge branch 'master' of github.com:tantivy-search/tantivy 2019-12-06 10:06:09 +09:00
Paul Masurel
5cffa71467 Using census 0.4 2019-12-06 10:04:01 +09:00
Christian Hunstad
02af28b3b7 add norwegian stemmer (#717) 2019-11-27 21:08:59 +09:00
Paul Masurel
afe0134d0f Kkoziara remove tokens from doc store (#715)
* Prevent tokens from being stored in the document store.

Commit adds prepare_for_store method to Document, which changes all
PreTokenizedString values into String values. The method is called
before adding document to the document store to prevent tokens from
being saved there. Commit also adds small changes to comments in
pre_tokenized_text example.

* Avoid storing the pretokenized text.
2019-11-25 22:39:12 +09:00
Christian Hunstad
db9e81d0f9 Updated rust-stemmers version to 1.2 (#716)
* Updated rust-stemmers version to 1.2

* 1.2.0 -> 1.2
2019-11-25 22:38:48 +09:00
Paul Masurel
3821f57ecc Closes #712 (#714)
Fixing the memory leak in the DeleteQueue.
2019-11-25 15:57:29 +09:00
Paul Masurel
d379f98b22 Waiting for indexing threads when dropping IndexWriter 2019-11-23 15:00:27 +09:00
Paul Masurel
ef3eddf3da clippy first stab (#711) 2019-11-22 13:09:35 +09:00
Paul Masurel
08a2368845 Closes #708 (#709)
Fixes a race condition in the test.
2019-11-21 11:41:59 +09:00
Paul Masurel
1868fc1e2c Text fix 2019-11-20 23:00:39 +09:00
Paul Masurel
451a0252ab thread pool merge (#704) 2019-11-20 21:18:05 +09:00
Paul Masurel
42756c7474 Removing futures-cpupool and upgrading to futures-0.3 2019-11-15 18:35:31 +09:00
Paul Masurel
598b076240 Making some of the IndexWriter's method public. 2019-11-11 12:41:45 +09:00
Paul Masurel
f1f96fc417 Updating some doc. 2019-11-11 10:04:12 +09:00
Paul Masurel
9c941603f5 Petr tik n662 errror incompatible footer version (#696)
* code tidy-up

Replace `20` magic constant with COMMON_FOOTER_SIZE

Add a docstring showing how footer is serialised
Add a test for footer length checking

* Add more tests for VersionedFooter

successful and panicking .to_bytes() calls

* Minor changes in footer.rs
2019-11-10 14:40:06 +09:00
Paul Masurel
fb3d6fa332 Adding Value::From<PretokenizedText> (#697) 2019-11-10 14:39:44 +09:00
75 changed files with 2566 additions and 1339 deletions

View File

@@ -1,3 +1,17 @@
Tantivy 0.11.3
=======================
- Fixed DateTime as a fast field (#735)
Tantivy 0.11.2
=======================
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
- Exposing a constructor for `WatchHandle` (#731)
Tantivy 0.11.1
=====================
- Bug fix #729
Tantivy 0.11.0 Tantivy 0.11.0
===================== =====================
@@ -9,6 +23,7 @@ Tantivy 0.11.0
- API change around `Box<BoxableTokenizer>`. See detail in #629 - API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock) - Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima) - Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock) - TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
- Added handling of pre-tokenized text fields (#642), which will enable users to - Added handling of pre-tokenized text fields (#642), which will enable users to
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara) load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
@@ -16,10 +31,11 @@ Tantivy 0.11.0
## How to update? ## How to update?
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct. - `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return - Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required. an error and handling the `Result` is required.
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
Tantivy 0.10.2 Tantivy 0.10.2
===================== =====================

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.11.0" version = "0.11.3"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -33,18 +33,16 @@ fs2={version="0.4", optional=true}
itertools = "0.8" itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]} levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true} notify = {version="4", optional=true}
bit-set = "0.5"
uuid = { version = "0.8", features = ["v4", "serde"] } uuid = { version = "0.8", features = ["v4", "serde"] }
crossbeam = "0.7" crossbeam = "0.7"
futures = "0.1" futures = {version = "0.3", features=["thread-pool"] }
futures-cpupool = "0.1"
owning_ref = "0.4" owning_ref = "0.4"
stable_deref_trait = "1.0.0" stable_deref_trait = "1.0.0"
rust-stemmers = "1.1" rust-stemmers = "1.2"
downcast-rs = { version="1.0" } downcast-rs = { version="1.0" }
tantivy-query-grammar = { path="./query-grammar" } tantivy-query-grammar = { version="0.11", path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]} bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.2" census = "0.4"
fnv = "1.0.6" fnv = "1.0.6"
owned-read = "0.4" owned-read = "0.4"
failure = "0.1" failure = "0.1"

View File

@@ -13,63 +13,100 @@
// --- // ---
// Importing tantivy... // Importing tantivy...
use tantivy::collector::FacetCollector; use tantivy::collector::FacetCollector;
use tantivy::query::AllQuery; use tantivy::query::{AllQuery, TermQuery};
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, Index}; use tantivy::{doc, Index};
use tempfile::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the // Let's create a temporary directory for the sake of this example
// sake of this example
let index_path = TempDir::new()?;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("name", TEXT | STORED); let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
// this is our faceted field: its scientific classification
// this is our faceted field let classification = schema_builder.add_facet_field("classification");
schema_builder.add_facet_field("tags");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let index = Index::create_in_dir(&index_path, schema.clone())?; let mut index_writer = index.writer(30_000_000)?;
let mut index_writer = index.writer(50_000_000)?;
let name = schema.get_field("name").unwrap();
let tags = schema.get_field("tags").unwrap();
// For convenience, tantivy also comes with a macro to // For convenience, tantivy also comes with a macro to
// reduce the boilerplate above. // reduce the boilerplate above.
index_writer.add_document(doc!( index_writer.add_document(doc!(
name => "the ditch", name => "Cat",
tags => Facet::from("/pools/north") classification => Facet::from("/Felidae/Felinae/Felis")
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
name => "little stacey", name => "Canada lynx",
tags => Facet::from("/pools/south") classification => Facet::from("/Felidae/Felinae/Lynx")
));
index_writer.add_document(doc!(
name => "Cheetah",
classification => Facet::from("/Felidae/Felinae/Acinonyx")
));
index_writer.add_document(doc!(
name => "Tiger",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Lion",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Jaguar",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Sunda clouded leopard",
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
));
index_writer.add_document(doc!(
name => "Fossa",
classification => Facet::from("/Eupleridae/Cryptoprocta")
)); ));
index_writer.commit()?; index_writer.commit()?;
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
{
let mut facet_collector = FacetCollector::for_field(classification);
facet_collector.add_facet("/Felidae");
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
// This lists all of the facet counts, right below "/Felidae".
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
assert_eq!(
facets,
vec![
(&Facet::from("/Felidae/Felinae"), 3),
(&Facet::from("/Felidae/Pantherinae"), 4),
]
);
}
let mut facet_collector = FacetCollector::for_field(tags); // Facets are also searchable.
facet_collector.add_facet("/pools"); //
// For instance a common UI pattern is to allow the user someone to click on a facet link
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
//
// The search would then look as follows.
let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap(); // Check the reference doc for different ways to create a `Facet` object.
{
// This lists all of the facet counts let facet = Facet::from_text("/Felidae/Pantherinae");
let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect(); let facet_term = Term::from_facet(classification, &facet);
assert_eq!( let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
facets, let mut facet_collector = FacetCollector::for_field(classification);
vec![ facet_collector.add_facet("/Felidae/Pantherinae");
(&Facet::from("/pools/north"), 1), let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
(&Facet::from("/pools/south"), 1), let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
] assert_eq!(
); facets,
vec![
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
]
);
}
Ok(()) Ok(())
} }

View File

@@ -65,11 +65,8 @@ fn main() -> tantivy::Result<()> {
tokens: pre_tokenize_text(body_text), tokens: pre_tokenize_text(body_text),
}; };
// Now lets create a document and add our `PreTokenizedString` using // Now lets create a document and add our `PreTokenizedString`
// `add_pre_tokenized_text` method of `Document` let old_man_doc = doc!(title => title_tok, body => body_tok);
let mut old_man_doc = Document::default();
old_man_doc.add_pre_tokenized_text(title, &title_tok);
old_man_doc.add_pre_tokenized_text(body, &body_tok);
// ... now let's just add it to the IndexWriter // ... now let's just add it to the IndexWriter
index_writer.add_document(old_man_doc); index_writer.add_document(old_man_doc);
@@ -116,6 +113,9 @@ fn main() -> tantivy::Result<()> {
assert_eq!(count, 2); assert_eq!(count, 2);
// Now let's print out the results.
// Note that the tokens are not stored along with the original text
// in the document store
for (_score, doc_address) in top_docs { for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?; let retrieved_doc = searcher.doc(doc_address)?;
println!("Document: {}", schema.to_json(&retrieved_doc)); println!("Document: {}", schema.to_json(&retrieved_doc));

3
query-grammar/README.md Normal file
View File

@@ -0,0 +1,3 @@
# Tantivy Query Grammar
This crate is used by tantivy to parse queries.

View File

@@ -13,44 +13,29 @@ use crate::SegmentReader;
/// use tantivy::collector::Count; /// use tantivy::collector::Count;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result}; /// use tantivy::{doc, Index};
/// ///
/// # fn main() { example().unwrap(); } /// let mut schema_builder = Schema::builder();
/// fn example() -> Result<()> { /// let title = schema_builder.add_text_field("title", TEXT);
/// let mut schema_builder = Schema::builder(); /// let schema = schema_builder.build();
/// let title = schema_builder.add_text_field("title", TEXT); /// let index = Index::create_in_ram(schema);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let reader = index.reader()?; /// let mut index_writer = index.writer(3_000_000).unwrap();
/// let searcher = reader.searcher(); /// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// { /// let reader = index.reader().unwrap();
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// let searcher = reader.searcher();
/// let query = query_parser.parse_query("diary")?;
/// let count = searcher.search(&query, &Count).unwrap();
/// ///
/// assert_eq!(count, 2); /// // Here comes the important part
/// } /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let count = searcher.search(&query, &Count).unwrap();
/// ///
/// Ok(()) /// assert_eq!(count, 2);
/// }
/// ``` /// ```
pub struct Count; pub struct Count;

View File

@@ -86,7 +86,6 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// use tantivy::schema::{Facet, Schema, TEXT}; /// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{doc, Index, Result}; /// use tantivy::{doc, Index, Result};
/// ///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// ///
@@ -127,7 +126,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang"); /// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category"); /// facet_collector.add_facet("/category");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
@@ -143,7 +142,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// } /// }
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
@@ -158,8 +157,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ]); /// ]);
/// } /// }
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
@@ -172,6 +171,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// # assert!(example().is_ok());
/// ``` /// ```
pub struct FacetCollector { pub struct FacetCollector {
field: Field, field: Field,
@@ -452,9 +452,11 @@ impl FacetCounts {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{FacetCollector, FacetCounts}; use super::{FacetCollector, FacetCounts};
use crate::collector::Count;
use crate::core::Index; use crate::core::Index;
use crate::query::AllQuery; use crate::query::{AllQuery, QueryParser, TermQuery};
use crate::schema::{Document, Facet, Field, Schema}; use crate::schema::{Document, Facet, Field, IndexRecordOption, Schema};
use crate::Term;
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@@ -544,6 +546,56 @@ mod tests {
assert_eq!(facets[0].1, 1); assert_eq!(facets[0].1, 1);
} }
#[test]
fn test_doc_search_by_facet() {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/A"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/B"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/C/A"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/D/C/A"),
));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 4);
let count_facet = |facet_str: &str| {
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str));
searcher
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
.unwrap()
};
assert_eq!(count_facet("/"), 4);
assert_eq!(count_facet("/A"), 3);
assert_eq!(count_facet("/A/B"), 1);
assert_eq!(count_facet("/A/C"), 1);
assert_eq!(count_facet("/A/C/A"), 1);
assert_eq!(count_facet("/C/A"), 0);
{
let query_parser = QueryParser::for_index(&index, vec![]);
{
let query = query_parser.parse_query("facet:/A/B").unwrap();
assert_eq!(1, searcher.search(&query, &Count).unwrap());
}
{
let query = query_parser.parse_query("facet:/A").unwrap();
assert_eq!(3, searcher.search(&query, &Count).unwrap());
}
}
}
#[test] #[test]
fn test_non_used_facet_collector() { fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));

View File

@@ -108,49 +108,35 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// use tantivy::collector::{Count, TopDocs, MultiCollector}; /// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result}; /// use tantivy::{doc, Index};
/// ///
/// # fn main() { example().unwrap(); } /// let mut schema_builder = Schema::builder();
/// fn example() -> Result<()> { /// let title = schema_builder.add_text_field("title", TEXT);
/// let mut schema_builder = Schema::builder(); /// let schema = schema_builder.build();
/// let title = schema_builder.add_text_field("title", TEXT); /// let index = Index::create_in_ram(schema);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let reader = index.reader()?; /// let mut index_writer = index.writer(3_000_000).unwrap();
/// let searcher = reader.searcher(); /// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let mut collectors = MultiCollector::new(); /// let reader = index.reader().unwrap();
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2)); /// let searcher = reader.searcher();
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
/// ///
/// let count = count_handle.extract(&mut multi_fruit); /// let mut collectors = MultiCollector::new();
/// let top_docs = top_docs_handle.extract(&mut multi_fruit); /// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
/// ///
/// # assert_eq!(count, 2); /// let count = count_handle.extract(&mut multi_fruit);
/// # assert_eq!(top_docs.len(), 2); /// let top_docs = top_docs_handle.extract(&mut multi_fruit);
/// ///
/// Ok(()) /// assert_eq!(count, 2);
/// } /// assert_eq!(top_docs.len(), 2);
/// ``` /// ```
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Default)] #[derive(Default)]

View File

@@ -29,43 +29,29 @@ use std::fmt;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, DocAddress, Index, Result}; /// use tantivy::{doc, DocAddress, Index};
/// ///
/// # fn main() { example().unwrap(); } /// let mut schema_builder = Schema::builder();
/// fn example() -> Result<()> { /// let title = schema_builder.add_text_field("title", TEXT);
/// let mut schema_builder = Schema::builder(); /// let schema = schema_builder.build();
/// let title = schema_builder.add_text_field("title", TEXT); /// let index = Index::create_in_ram(schema);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let reader = index.reader()?; /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
/// let searcher = reader.searcher(); /// index_writer.add_document(doc!(title => "The Name of the Wind"));
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// let reader = index.reader().unwrap();
/// let query = query_parser.parse_query("diary")?; /// let searcher = reader.searcher();
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
/// ///
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1))); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3))); /// let query = query_parser.parse_query("diary").unwrap();
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
/// ///
/// Ok(()) /// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// } /// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// ``` /// ```
pub struct TopDocs(TopCollector<Score>); pub struct TopDocs(TopCollector<Score>);
@@ -102,15 +88,12 @@ impl TopDocs {
/// # /// #
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!( /// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64)); /// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64)); /// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64)); /// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # index_writer.commit()?; /// # assert!(index_writer.commit().is_ok());
/// # let reader = index.reader()?; /// # let reader = index.reader().unwrap();
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?; /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?; /// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
@@ -202,27 +185,33 @@ impl TopDocs {
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field; /// use tantivy::schema::Field;
/// ///
/// # fn create_schema() -> Schema { /// fn create_schema() -> Schema {
/// # let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// # schema_builder.add_text_field("product_name", TEXT); /// schema_builder.add_text_field("product_name", TEXT);
/// # schema_builder.add_u64_field("popularity", FAST); /// schema_builder.add_u64_field("popularity", FAST);
/// # schema_builder.build() /// schema_builder.build()
/// # } /// }
/// # ///
/// # fn main() -> tantivy::Result<()> { /// fn create_index() -> tantivy::Result<Index> {
/// # let schema = create_schema(); /// let schema = create_schema();
/// # let index = Index::create_in_ram(schema); /// let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?; /// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # let product_name = index.schema().get_field("product_name").unwrap(); /// let product_name = index.schema().get_field("product_name").unwrap();
/// # /// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// index_writer.commit()?;
/// Ok(index)
/// }
///
/// let index = create_index().unwrap();
/// let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64)); ///
/// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64)); /// let user_query_str = "diary";
/// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64)); /// let query_parser = QueryParser::for_index(&index, vec![product_name]);
/// # index_writer.commit()?; /// let query = query_parser.parse_query(user_query_str).unwrap();
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
/// ///
/// // This is where we build our collector with our custom score. /// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs /// let top_docs_by_custom_score = TopDocs
@@ -249,15 +238,12 @@ impl TopDocs {
/// popularity_boost_score * original_score /// popularity_boost_score * original_score
/// } /// }
/// }); /// });
/// # let reader = index.reader()?; /// let reader = index.reader().unwrap();
/// # let searcher = reader.searcher(); /// let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score. /// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<(Score, DocAddress)> = /// let resulting_docs: Vec<(Score, DocAddress)> =
/// searcher.search(&*query, &top_docs_by_custom_score)?; /// searcher.search(&query, &top_docs_by_custom_score).unwrap();
///
/// # Ok(())
/// # }
/// ``` /// ```
/// ///
/// # See also /// # See also

View File

@@ -186,7 +186,7 @@ mod test {
use super::{CompositeFile, CompositeWrite}; use super::{CompositeFile, CompositeWrite};
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::directory::{Directory, RAMDirectory}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory};
use crate::schema::Field; use crate::schema::Field;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;

View File

@@ -104,23 +104,21 @@ impl Index {
if Index::exists(&mmap_directory) { if Index::exists(&mmap_directory) {
return Err(TantivyError::IndexAlreadyExists); return Err(TantivyError::IndexAlreadyExists);
} }
Index::create(mmap_directory, schema) Index::create(mmap_directory, schema)
} }
/// Opens or creates a new index in the provided directory /// Opens or creates a new index in the provided directory
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> { pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
if Index::exists(&dir) { if !Index::exists(&dir) {
let index = Index::open(dir)?; return Index::create(dir, schema);
if index.schema() == schema { }
Ok(index) let index = Index::open(dir)?;
} else { if index.schema() == schema {
Err(TantivyError::SchemaError( Ok(index)
"An index exists but the schema does not match.".to_string(),
))
}
} else { } else {
Index::create(dir, schema) Err(TantivyError::SchemaError(
"An index exists but the schema does not match.".to_string(),
))
} }
} }
@@ -340,7 +338,7 @@ impl Index {
/// Creates a new segment. /// Creates a new segment.
pub fn new_segment(&self) -> Segment { pub fn new_segment(&self) -> Segment {
let segment_meta = self let mut segment_meta = self
.inventory .inventory
.new_segment_meta(SegmentId::generate_random(), 0); .new_segment_meta(SegmentId::generate_random(), 0);
self.segment(segment_meta) self.segment(segment_meta)
@@ -388,12 +386,9 @@ mod tests {
use crate::directory::RAMDirectory; use crate::directory::RAMDirectory;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::{Schema, INDEXED, TEXT}; use crate::schema::{Schema, INDEXED, TEXT};
use crate::Index;
use crate::IndexReader; use crate::IndexReader;
use crate::IndexWriter;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use std::thread; use crate::{Directory, Index};
use std::time::Duration;
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
@@ -471,14 +466,14 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); test_index_on_commit_reload_policy_aux(field, &index, &reader);
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_specific { mod mmap_specific {
use super::*; use super::*;
use crate::Directory;
use std::path::PathBuf; use std::path::PathBuf;
use tempfile::TempDir; use tempfile::TempDir;
@@ -489,22 +484,20 @@ mod tests {
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap(); let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::OnCommit) .reload_policy(ReloadPolicy::OnCommit)
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader); test_index_on_commit_reload_policy_aux(field, &index, &reader);
} }
#[test] #[test]
fn test_index_manual_policy_mmap() { fn test_index_manual_policy_mmap() {
let schema = throw_away_schema(); let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let index = Index::create_from_tempdir(schema).unwrap(); let mut index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap(); writer.commit().unwrap();
let reader = index let reader = index
@@ -514,8 +507,12 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = index.directory_mut().watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap(); writer.commit().unwrap();
thread::sleep(Duration::from_millis(500)); assert!(receiver.recv().is_ok());
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
reader.reload().unwrap(); reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 1); assert_eq!(reader.searcher().num_docs(), 1);
@@ -535,39 +532,26 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap(); test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
} }
fn test_index_on_commit_reload_policy_aux( fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
field: Field, let mut reader_index = reader.index();
writer: &mut IndexWriter, let (sender, receiver) = crossbeam::channel::unbounded();
reader: &IndexReader, let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
) { let _ = sender.send(());
}));
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
writer.commit().unwrap(); writer.commit().unwrap();
let mut count = 0; assert!(receiver.recv().is_ok());
for _ in 0..100 { assert_eq!(reader.searcher().num_docs(), 1);
count = reader.searcher().num_docs();
if count > 0 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 1);
writer.add_document(doc!(field=>2u64)); writer.add_document(doc!(field=>2u64));
writer.commit().unwrap(); writer.commit().unwrap();
let mut count = 0; assert!(receiver.recv().is_ok());
for _ in 0..10 { assert_eq!(reader.searcher().num_docs(), 2);
count = reader.searcher().num_docs();
if count > 1 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 2);
} }
// This test will not pass on windows, because windows // This test will not pass on windows, because windows
@@ -584,9 +568,13 @@ mod tests {
for i in 0u64..8_000u64 { for i in 0u64..8_000u64 {
writer.add_document(doc!(field => i)); writer.add_document(doc!(field => i));
} }
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = directory.watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap(); writer.commit().unwrap();
let mem_right_after_commit = directory.total_mem_usage(); let mem_right_after_commit = directory.total_mem_usage();
thread::sleep(Duration::from_millis(1_000)); assert!(receiver.recv().is_ok());
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
@@ -600,6 +588,11 @@ mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 8_000); assert_eq!(searcher.num_docs(), 8_000);
assert!(mem_right_after_merge_finished < mem_right_after_commit); assert!(
mem_right_after_merge_finished < mem_right_after_commit,
"(mem after merge){} is expected < (mem before merge){}",
mem_right_after_merge_finished,
mem_right_after_commit
);
} }
} }

View File

@@ -35,6 +35,7 @@ impl SegmentMetaInventory {
segment_id, segment_id,
max_doc, max_doc,
deletes: None, deletes: None,
bundled: false,
}; };
SegmentMeta::from(self.inventory.track(inner)) SegmentMeta::from(self.inventory.track(inner))
} }
@@ -81,6 +82,19 @@ impl SegmentMeta {
self.tracked.segment_id self.tracked.segment_id
} }
pub fn with_bundled(self) -> SegmentMeta {
SegmentMeta::from(self.tracked.map(|inner| InnerSegmentMeta {
segment_id: inner.segment_id,
max_doc: inner.max_doc,
deletes: inner.deletes.clone(),
bundled: true,
}))
}
pub fn is_bundled(&self) -> bool {
self.tracked.bundled
}
/// Returns the number of deleted documents. /// Returns the number of deleted documents.
pub fn num_deleted_docs(&self) -> u32 { pub fn num_deleted_docs(&self) -> u32 {
self.tracked self.tracked
@@ -107,8 +121,12 @@ impl SegmentMeta {
/// It just joins the segment id with the extension /// It just joins the segment id with the extension
/// associated to a segment component. /// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf { pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let mut path = self.id().uuid_string(); let suffix = self.suffix(component);
path.push_str(&*match component { self.relative_path_from_suffix(&suffix)
}
fn suffix(&self, component: SegmentComponent) -> String {
match component {
SegmentComponent::POSTINGS => ".idx".to_string(), SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::POSITIONS => ".pos".to_string(), SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(), SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
@@ -117,7 +135,17 @@ impl SegmentMeta {
SegmentComponent::FASTFIELDS => ".fast".to_string(), SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(), SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)), SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
}); }
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path_from_suffix(&self, suffix: &str) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(".");
path.push_str(&suffix);
PathBuf::from(path) PathBuf::from(path)
} }
@@ -161,6 +189,7 @@ impl SegmentMeta {
segment_id: inner_meta.segment_id, segment_id: inner_meta.segment_id,
max_doc, max_doc,
deletes: None, deletes: None,
bundled: inner_meta.bundled,
}); });
SegmentMeta { tracked } SegmentMeta { tracked }
} }
@@ -175,6 +204,7 @@ impl SegmentMeta {
segment_id: inner_meta.segment_id, segment_id: inner_meta.segment_id,
max_doc: inner_meta.max_doc, max_doc: inner_meta.max_doc,
deletes: Some(delete_meta), deletes: Some(delete_meta),
bundled: inner_meta.bundled,
}); });
SegmentMeta { tracked } SegmentMeta { tracked }
} }
@@ -185,6 +215,7 @@ struct InnerSegmentMeta {
segment_id: SegmentId, segment_id: SegmentId,
max_doc: u32, max_doc: u32,
deletes: Option<DeleteMeta>, deletes: Option<DeleteMeta>,
bundled: bool,
} }
impl InnerSegmentMeta { impl InnerSegmentMeta {
@@ -300,6 +331,9 @@ mod tests {
payload: None, payload: None,
}; };
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed"); let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#); assert_eq!(
json,
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
);
} }
} }

View File

@@ -4,14 +4,12 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlyDirectory, ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::result;
/// A segment is a piece of the index. /// A segment is a piece of the index.
#[derive(Clone)] #[derive(Clone)]
@@ -83,23 +81,30 @@ impl Segment {
} }
/// Open one of the component file for a *regular* read. /// Open one of the component file for a *regular* read.
pub fn open_read( pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> {
&self,
component: SegmentComponent,
) -> result::Result<ReadOnlySource, OpenReadError> {
let path = self.relative_path(component); let path = self.relative_path(component);
let source = self.index.directory().open_read(&path)?; let source = self.index.directory().open_read(&path)?;
Ok(source) Ok(source)
} }
/// Open one of the component file for *regular* write. /// Open one of the component file for *regular* write.
pub fn open_write( pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component);
self.index.directory_mut().open_write(&path)
}
pub fn open_bundle_writer(&mut self) -> Result<WritePtr, OpenWriteError> {
let path = self.meta.relative_path_from_suffix("bundle");
self.index.directory_mut().open_write(&path)
}
pub(crate) fn open_write_in_directory(
&mut self, &mut self,
component: SegmentComponent, component: SegmentComponent,
) -> result::Result<WritePtr, OpenWriteError> { directory: &mut dyn Directory,
) -> Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component); let path = self.relative_path(component);
let write = self.index.directory_mut().open_write(&path)?; directory.open_write(&path)
Ok(write)
} }
} }
@@ -109,5 +114,5 @@ pub trait SerializableSegment {
/// ///
/// # Returns /// # Returns
/// The number of documents in the segment. /// The number of documents in the segment.
fn write(&self, serializer: SegmentSerializer) -> Result<u32>; fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32>;
} }

View File

@@ -0,0 +1,97 @@
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::OpenReadError;
use crate::directory::ReadOnlySource;
use crate::error::DataCorruption;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
#[derive(Clone)]
struct BundleDirectory {
source_map: Arc<HashMap<PathBuf, ReadOnlySource>>,
}
impl BundleDirectory {
pub fn from_source(source: ReadOnlySource) -> Result<BundleDirectory, DataCorruption> {
let mut index_offset_buf = [0u8; 8];
let (body_idx, footer_offset) = source.split_from_end(8);
index_offset_buf.copy_from_slice(footer_offset.as_slice());
let offset = u64::from_le_bytes(index_offset_buf);
let (body_source, idx_source) = body_idx.split(offset as usize);
let idx: HashMap<PathBuf, (u64, u64)> = serde_json::from_slice(idx_source.as_slice())
.map_err(|err| {
let msg = format!("Failed to read index from bundle. {:?}", err);
DataCorruption::comment_only(msg)
})?;
let source_map: HashMap<PathBuf, ReadOnlySource> = idx
.into_iter()
.map(|(path, (start, stop))| {
let source = body_source.slice(start as usize, stop as usize);
(path, source)
})
.collect();
Ok(BundleDirectory {
source_map: Arc::new(source_map),
})
}
}
impl ReadOnlyDirectory for BundleDirectory {
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.source_map
.get(path)
.cloned()
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))
}
fn exists(&self, path: &Path) -> bool {
self.source_map.contains_key(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let source = self
.source_map
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))?;
Ok(source.as_slice().to_vec())
}
}
#[cfg(test)]
mod tests {
use super::BundleDirectory;
use crate::directory::{RAMDirectory, ReadOnlyDirectory, TerminatingWrite};
use crate::Directory;
use std::io::Write;
use std::path::Path;
#[test]
fn test_bundle_directory() {
let mut ram_directory = RAMDirectory::default();
let test_path_atomic = Path::new("testpath_atomic");
let test_path_wrt = Path::new("testpath_wrt");
assert!(ram_directory
.atomic_write(test_path_atomic, b"titi")
.is_ok());
{
let mut test_wrt = ram_directory.open_write(test_path_wrt).unwrap();
assert!(test_wrt.write_all(b"toto").is_ok());
assert!(test_wrt.terminate().is_ok());
}
let mut dest_directory = RAMDirectory::default();
let bundle_path = Path::new("bundle");
let mut wrt = dest_directory.open_write(bundle_path).unwrap();
assert!(ram_directory.serialize_bundle(&mut wrt).is_ok());
assert!(wrt.terminate().is_ok());
let source = dest_directory.open_read(bundle_path).unwrap();
let bundle_directory = BundleDirectory::from_source(source).unwrap();
assert_eq!(
&bundle_directory.atomic_read(test_path_atomic).unwrap()[..],
b"titi"
);
assert_eq!(
&bundle_directory.open_read(test_path_wrt).unwrap()[..],
b"toto"
);
}
}

View File

@@ -100,6 +100,30 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
} }
} }
pub trait ReadOnlyDirectory {
/// Opens a virtual file for read.
///
/// Once a virtual file is open, its data may not
/// change.
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [Directory::open_write].
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [Directory::atomic_write].
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
}
/// Write-once read many (WORM) abstraction for where /// Write-once read many (WORM) abstraction for where
/// tantivy's data should be stored. /// tantivy's data should be stored.
/// ///
@@ -110,18 +134,9 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which /// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be used mostly for tests. /// should be used mostly for tests.
/// ///
pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static { pub trait Directory:
/// Opens a virtual file for read. DirectoryClone + ReadOnlyDirectory + fmt::Debug + Send + Sync + 'static
/// {
/// Once a virtual file is open, its data may not
/// change.
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file /// Removes a file
/// ///
/// Removing a file will not affect an eventual /// Removing a file will not affect an eventual
@@ -131,9 +146,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// `DeleteError::DoesNotExist`. /// `DeleteError::DoesNotExist`.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>; fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Opens a writer for the *virtual file* associated with /// Opens a writer for the *virtual file* associated with
/// a Path. /// a Path.
/// ///
@@ -155,14 +167,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// The file may not previously exist. /// The file may not previously exist.
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>; fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data. /// Atomically replace the content of a file with data.
/// ///
/// This calls ensure that reads can never *observe* /// This calls ensure that reads can never *observe*
@@ -197,7 +201,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// Registers a callback that will be called whenever a change on the `meta.json` /// Registers a callback that will be called whenever a change on the `meta.json`
/// using the `atomic_write` API is detected. /// using the `atomic_write` API is detected.
/// ///
/// The behavior when using `.watch()` on a file using `.open_write(...)` is, on the other /// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other
/// hand, undefined. /// hand, undefined.
/// ///
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is /// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is

View File

@@ -1,3 +1,4 @@
use crate::Version;
use std::error::Error as StdError; use std::error::Error as StdError;
use std::fmt; use std::fmt;
use std::io; use std::io;
@@ -156,6 +157,65 @@ impl StdError for OpenWriteError {
} }
} }
/// Type of index incompatibility between the library and the index found on disk
/// Used to catch and provide a hint to solve this incompatibility issue
pub enum Incompatibility {
/// This library cannot decompress the index found on disk
CompressionMismatch {
/// Compression algorithm used by the current version of tantivy
library_compression_format: String,
/// Compression algorithm that was used to serialise the index
index_compression_format: String,
},
/// The index format found on disk isn't supported by this version of the library
IndexMismatch {
/// Version used by the library
library_version: Version,
/// Version the index was built with
index_version: Version,
},
}
impl fmt::Debug for Incompatibility {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
Incompatibility::CompressionMismatch {
library_compression_format,
index_compression_format,
} => {
let err = format!(
"Library was compiled with {:?} compression, index was compressed with {:?}",
library_compression_format, index_compression_format
);
let advice = format!(
"Change the feature flag to {:?} and rebuild the library",
index_compression_format
);
write!(f, "{}. {}", err, advice)?;
}
Incompatibility::IndexMismatch {
library_version,
index_version,
} => {
let err = format!(
"Library version: {}, index version: {}",
library_version.index_format_version, index_version.index_format_version
);
// TODO make a more useful error message
// include the version range that supports this index_format_version
let advice = format!(
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
and rebuild your project.",
index_version.index_format_version, index_version.major, index_version.minor
);
write!(f, "{}. {}", err, advice)?;
}
}
Ok(())
}
}
/// Error that may occur when accessing a file read /// Error that may occur when accessing a file read
#[derive(Debug)] #[derive(Debug)]
pub enum OpenReadError { pub enum OpenReadError {
@@ -164,6 +224,8 @@ pub enum OpenReadError {
/// Any kind of IO error that happens when /// Any kind of IO error that happens when
/// interacting with the underlying IO device. /// interacting with the underlying IO device.
IOError(IOError), IOError(IOError),
/// This library doesn't support the index version found on disk
IncompatibleIndex(Incompatibility),
} }
impl From<IOError> for OpenReadError { impl From<IOError> for OpenReadError {
@@ -183,19 +245,9 @@ impl fmt::Display for OpenReadError {
"an io error occurred while opening a file for reading: '{}'", "an io error occurred while opening a file for reading: '{}'",
err err
), ),
} OpenReadError::IncompatibleIndex(ref footer) => {
} write!(f, "Incompatible index format: {:?}", footer)
} }
impl StdError for OpenReadError {
fn description(&self) -> &str {
"error occurred while opening a file for reading"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err),
} }
} }
} }
@@ -216,6 +268,12 @@ impl From<IOError> for DeleteError {
} }
} }
impl From<Incompatibility> for OpenReadError {
fn from(incompatibility: Incompatibility) -> Self {
OpenReadError::IncompatibleIndex(incompatibility)
}
}
impl fmt::Display for DeleteError { impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {

View File

@@ -1,159 +1,175 @@
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
use crate::directory::error::Incompatibility;
use crate::directory::read_only_source::ReadOnlySource; use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite}; use crate::directory::{AntiCallToken, TerminatingWrite};
use byteorder::{ByteOrder, LittleEndian}; use crate::Version;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use crc32fast::Hasher; use crc32fast::Hasher;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
const COMMON_FOOTER_SIZE: usize = 4 * 5; type CrcHashU32 = u32;
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct Footer { pub struct Footer {
pub tantivy_version: (u32, u32, u32), pub version: Version,
pub meta: String, pub meta: String,
pub versioned_footer: VersionedFooter, pub versioned_footer: VersionedFooter,
} }
/// Serialises the footer to a byte-array
/// - versioned_footer_len : 4 bytes
///- versioned_footer: variable bytes
/// - meta_len: 4 bytes
/// - meta: variable bytes
/// - version_len: 4 bytes
/// - version json: variable bytes
impl BinarySerializable for Footer {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
BinarySerializable::serialize(&self.versioned_footer, writer)?;
BinarySerializable::serialize(&self.meta, writer)?;
let version_string =
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
BinarySerializable::serialize(&version_string, writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let versioned_footer = VersionedFooter::deserialize(reader)?;
let meta = String::deserialize(reader)?;
let version_json = String::deserialize(reader)?;
let version = serde_json::from_str(&version_json)?;
Ok(Footer {
version,
meta,
versioned_footer,
})
}
}
impl Footer { impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self { pub fn new(versioned_footer: VersionedFooter) -> Self {
let tantivy_version = ( let version = crate::VERSION.clone();
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(), let meta = version.to_string();
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer { Footer {
tantivy_version, version,
meta: format!( meta,
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer, versioned_footer,
} }
} }
pub fn to_bytes(&self) -> Vec<u8> { pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
let mut res = self.versioned_footer.to_bytes(); let mut counting_write = CountingWriter::wrap(&mut write);
res.extend_from_slice(self.meta.as_bytes()); self.serialize(&mut counting_write)?;
let len = res.len(); let written_len = counting_write.written_bytes();
res.resize(len + COMMON_FOOTER_SIZE, 0); write.write_u32::<LittleEndian>(written_len as u32)?;
let mut common_footer = &mut res[len..]; Ok(())
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
}
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The footer len is {}, while the entire file len is {}",
size, len
),
));
}
let footer = &data[len - size as usize..];
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
Ok(Footer {
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
} }
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> { pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
let footer = Footer::from_bytes(source.as_slice())?; if source.len() < 4 {
let reader = source.slice_to(source.as_slice().len() - footer.size()); return Err(io::Error::new(
Ok((footer, reader)) io::ErrorKind::UnexpectedEof,
} format!(
"File corrupted. The file is smaller than 4 bytes (len={}).",
pub fn size(&self) -> usize { source.len()
self.versioned_footer.size() as usize + self.meta.len() + 20 ),
} ));
}
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion { version: u32, size: u32 },
V0(u32), // crc
}
impl VersionedFooter {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
} }
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
let body_len = body_footer.len() - footer_len;
let (body, footer_data) = body_footer.split(body_len);
let mut cursor = footer_data.as_slice();
let footer = Footer::deserialize(&mut cursor)?;
Ok((footer, body))
} }
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> { /// Confirms that the index will be read correctly by this version of tantivy
assert!(footer.len() >= 4); /// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory
let version = LittleEndian::read_u32(footer); pub fn is_compatible(&self) -> Result<(), Incompatibility> {
match version { let library_version = crate::version();
0 => { match &self.versioned_footer {
if footer.len() == 8 { VersionedFooter::V1 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..]))) crc32: _crc,
} else { store_compression: compression,
Err(io::Error::new( } => {
io::ErrorKind::UnexpectedEof, if &library_version.store_compression != compression {
format!( return Err(Incompatibility::CompressionMismatch {
"File corrupted. The versioned footer len is {}, while it should be 8", library_compression_format: library_version.store_compression.to_string(),
footer.len() index_compression_format: compression.to_string(),
), });
))
} }
Ok(())
} }
version => Ok(VersionedFooter::UnknownVersion { VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
version, library_version: library_version.clone(),
size: footer.len() as u32, index_version: self.version.clone(),
}), }),
} }
} }
}
pub fn size(&self) -> u32 { /// Footer that includes a crc32 hash that enables us to checksum files in the index
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion,
V1 {
crc32: CrcHashU32,
store_compression: String,
},
}
impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new();
match self { match self {
VersionedFooter::V0(_) => 8, VersionedFooter::V1 {
VersionedFooter::UnknownVersion { size, .. } => *size, crc32,
store_compression: compression,
} => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&1u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?;
}
VersionedFooter::UnknownVersion => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ",
));
}
} }
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
writer.write_all(&buf[..])?;
Ok(())
} }
pub fn version(&self) -> u32 { fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
match self { let len = VInt::deserialize(reader)?.0 as usize;
VersionedFooter::V0(_) => 0, let mut buf = vec![0u8; len];
VersionedFooter::UnknownVersion { version, .. } => *version, reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..];
let version = u32::deserialize(&mut cursor)?;
if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?;
let compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 {
crc32,
store_compression: compression,
})
} else {
Ok(VersionedFooter::UnknownVersion)
} }
} }
}
pub fn crc(&self) -> Option<u32> { impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> {
match self { match self {
VersionedFooter::V0(crc) => Some(*crc), VersionedFooter::V1 { crc32, .. } => Some(*crc32),
VersionedFooter::UnknownVersion { .. } => None, VersionedFooter::UnknownVersion { .. } => None,
} }
} }
@@ -189,25 +205,135 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> { impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> { fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc = self.hasher.take().unwrap().finalize(); let crc32 = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V1 {
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes(); crc32,
store_compression: crate::store::COMPRESSION.to_string(),
});
let mut writer = self.writer.take().unwrap(); let mut writer = self.writer.take().unwrap();
writer.write_all(&footer)?; footer.append_footer(&mut writer)?;
writer.terminate() writer.terminate()
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::CrcHashU32;
use super::FooterProxy;
use crate::common::BinarySerializable;
use crate::directory::footer::{Footer, VersionedFooter}; use crate::directory::footer::{Footer, VersionedFooter};
use crate::directory::TerminatingWrite;
use byteorder::{ByteOrder, LittleEndian};
use regex::Regex;
#[test]
fn test_versioned_footer() {
let mut vec = Vec::new();
let footer_proxy = FooterProxy::new(&mut vec);
assert!(footer_proxy.terminate().is_ok());
assert_eq!(vec.len(), 167);
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
if let VersionedFooter::V1 {
crc32: _,
store_compression,
} = footer.versioned_footer
{
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version());
}
#[test] #[test]
fn test_serialize_deserialize_footer() { fn test_serialize_deserialize_footer() {
let crc = 123456; let mut buffer = Vec::new();
let footer = Footer::new(VersionedFooter::V0(crc)); let crc32 = 123456u32;
let footer_bytes = footer.to_bytes(); let footer: Footer = Footer::new(VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
});
footer.serialize(&mut buffer).unwrap();
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
assert_eq!(footer_deser, footer);
}
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer); #[test]
fn footer_length() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let mut buf = Vec::new();
versioned_footer.serialize(&mut buf).unwrap();
assert_eq!(buf.len(), 13);
let footer = Footer::new(versioned_footer);
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
assert!(regex_ptn.is_match(&footer.meta));
}
#[test]
fn versioned_footer_from_bytes() {
let v_footer_bytes = vec![
// versionned footer length
12 | 128,
// index format version
1,
0,
0,
0,
// crc 32
12,
35,
89,
18,
// compression format
3 | 128,
b'l',
b'z',
b'4',
];
let mut cursor = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
crc32: expected_crc,
store_compression: "lz4".to_string(),
};
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buffer = Vec::new();
assert!(versioned_footer.serialize(&mut buffer).is_ok());
assert_eq!(&v_footer_bytes[..], &buffer[..]);
}
#[test]
fn versioned_footer_panic() {
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
let mut b = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
assert!(b.is_empty());
let expected_versioned_footer = VersionedFooter::UnknownVersion;
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buf = Vec::new();
assert!(versioned_footer.serialize(&mut buf).is_err());
}
#[test]
#[cfg(not(feature = "lz4"))]
fn compression_mismatch() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let footer = Footer::new(versioned_footer);
let res = footer.is_compatible();
assert!(res.is_err());
} }
} }

View File

@@ -2,13 +2,15 @@ use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy}; use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::GarbageCollectionResult;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::META_LOCK; use crate::directory::META_LOCK;
use crate::directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{WatchCallback, WatchHandle}; use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
use crate::Result;
use crate::directory::directory::ReadOnlyDirectory;
use crc32fast::Hasher; use crc32fast::Hasher;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
@@ -64,7 +66,7 @@ fn save_managed_paths(
impl ManagedDirectory { impl ManagedDirectory {
/// Wraps a directory as managed directory. /// Wraps a directory as managed directory.
pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> { pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> {
match directory.atomic_read(&MANAGED_FILEPATH) { match directory.atomic_read(&MANAGED_FILEPATH) {
Ok(data) => { Ok(data) => {
let managed_files_json = String::from_utf8_lossy(&data); let managed_files_json = String::from_utf8_lossy(&data);
@@ -87,6 +89,11 @@ impl ManagedDirectory {
meta_informations: Arc::default(), meta_informations: Arc::default(),
}), }),
Err(OpenReadError::IOError(e)) => Err(From::from(e)), Err(OpenReadError::IOError(e)) => Err(From::from(e)),
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
// For the moment, this should never happen `meta.json`
// do not have any footer and cannot detect incompatibility.
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
}
} }
} }
@@ -104,7 +111,10 @@ impl ManagedDirectory {
/// If a file cannot be deleted (for permission reasons for instance) /// If a file cannot be deleted (for permission reasons for instance)
/// an error is simply logged, and the file remains in the list of managed /// an error is simply logged, and the file remains in the list of managed
/// files. /// files.
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) { pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(
&mut self,
get_living_files: L,
) -> crate::Result<GarbageCollectionResult> {
info!("Garbage collect"); info!("Garbage collect");
let mut files_to_delete = vec![]; let mut files_to_delete = vec![];
@@ -130,19 +140,25 @@ impl ManagedDirectory {
// 2) writer change meta.json (for instance after a merge or a commit) // 2) writer change meta.json (for instance after a merge or a commit)
// 3) gc kicks in. // 3) gc kicks in.
// 4) gc removes a file that was useful for process B, before process B opened it. // 4) gc removes a file that was useful for process B, before process B opened it.
if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) { match self.acquire_lock(&META_LOCK) {
let living_files = get_living_files(); Ok(_meta_lock) => {
for managed_path in &meta_informations_rlock.managed_paths { let living_files = get_living_files();
if !living_files.contains(managed_path) { for managed_path in &meta_informations_rlock.managed_paths {
files_to_delete.push(managed_path.clone()); if !living_files.contains(managed_path) {
files_to_delete.push(managed_path.clone());
}
} }
} }
} else { Err(err) => {
error!("Failed to acquire lock for GC"); error!("Failed to acquire lock for GC");
return Err(crate::Error::from(err));
}
} }
} }
let mut failed_to_delete_files = vec![];
let mut deleted_files = vec![]; let mut deleted_files = vec![];
for file_to_delete in files_to_delete { for file_to_delete in files_to_delete {
match self.delete(&file_to_delete) { match self.delete(&file_to_delete) {
Ok(_) => { Ok(_) => {
@@ -152,9 +168,10 @@ impl ManagedDirectory {
Err(file_error) => { Err(file_error) => {
match file_error { match file_error {
DeleteError::FileDoesNotExist(_) => { DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete); deleted_files.push(file_to_delete.clone());
} }
DeleteError::IOError(_) => { DeleteError::IOError(_) => {
failed_to_delete_files.push(file_to_delete.clone());
if !cfg!(target_os = "windows") { if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file // On windows, delete is expected to fail if the file
// is mmapped. // is mmapped.
@@ -177,10 +194,13 @@ impl ManagedDirectory {
for delete_file in &deleted_files { for delete_file in &deleted_files {
managed_paths_write.remove(delete_file); managed_paths_write.remove(delete_file);
} }
if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() { save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?;
error!("Failed to save the list of managed files.");
}
} }
Ok(GarbageCollectionResult {
deleted_files,
failed_to_delete_files,
})
} }
/// Registers a file as managed /// Registers a file as managed
@@ -245,13 +265,6 @@ impl ManagedDirectory {
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
@@ -269,18 +282,10 @@ impl Directory for ManagedDirectory {
self.directory.atomic_write(path, data) self.directory.atomic_write(path, data)
} }
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
self.directory.delete(path) self.directory.delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> { fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
self.directory.acquire_lock(lock) self.directory.acquire_lock(lock)
} }
@@ -290,6 +295,24 @@ impl Directory for ManagedDirectory {
} }
} }
impl ReadOnlyDirectory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?;
Ok(reader)
}
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
}
impl Clone for ManagedDirectory { impl Clone for ManagedDirectory {
fn clone(&self) -> ManagedDirectory { fn clone(&self) -> ManagedDirectory {
ManagedDirectory { ManagedDirectory {
@@ -303,7 +326,9 @@ impl Clone for ManagedDirectory {
#[cfg(test)] #[cfg(test)]
mod tests_mmap_specific { mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite}; use crate::directory::{
Directory, ManagedDirectory, MmapDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::Write; use std::io::Write;
@@ -328,7 +353,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(managed_directory.exists(test_path2)); assert!(managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect(); let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
managed_directory.garbage_collect(|| living_files); assert!(managed_directory.garbage_collect(|| living_files).is_ok());
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
} }
@@ -338,7 +363,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = HashSet::new(); let living_files: HashSet<PathBuf> = HashSet::new();
managed_directory.garbage_collect(|| living_files); assert!(managed_directory.garbage_collect(|| living_files).is_ok());
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
} }
@@ -360,7 +385,9 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap(); let _mmap_read = managed_directory.open_read(test_path1).unwrap();
managed_directory.garbage_collect(|| living_files.clone()); assert!(managed_directory
.garbage_collect(|| living_files.clone())
.is_ok());
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped. // On Windows, gc should try and fail the file as it is mmapped.
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
@@ -368,7 +395,7 @@ mod tests_mmap_specific {
drop(_mmap_read); drop(_mmap_read);
// The file should still be in the list of managed file and // The file should still be in the list of managed file and
// eventually be deleted once mmap is released. // eventually be deleted once mmap is released.
managed_directory.garbage_collect(|| living_files); assert!(managed_directory.garbage_collect(|| living_files).is_ok());
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
} else { } else {
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
@@ -393,6 +420,8 @@ mod tests_mmap_specific {
write.write_all(&[3u8, 4u8, 5u8]).unwrap(); write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap(); write.terminate().unwrap();
let read_source = managed_directory.open_read(test_path2).unwrap();
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
assert!(managed_directory.list_damaged().unwrap().is_empty()); assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone(); let mut corrupted_path = tempdir_path.clone();

View File

@@ -6,6 +6,7 @@ use self::notify::RawEvent;
use self::notify::RecursiveMode; use self::notify::RecursiveMode;
use self::notify::Watcher; use self::notify::Watcher;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::LockError; use crate::directory::error::LockError;
use crate::directory::error::{ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError, DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
@@ -131,14 +132,13 @@ impl MmapCache {
} }
self.cache.remove(full_path); self.cache.remove(full_path);
self.counters.miss += 1; self.counters.miss += 1;
Ok(if let Some(mmap) = open_mmap(full_path)? { let mmap_opt = open_mmap(full_path)?;
Ok(mmap_opt.map(|mmap| {
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap)); let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
let mmap_weak = Arc::downgrade(&mmap_arc); let mmap_weak = Arc::downgrade(&mmap_arc);
self.cache.insert(full_path.to_owned(), mmap_weak); self.cache.insert(full_path.to_owned(), mmap_weak);
Some(mmap_arc) mmap_arc
} else { }))
None
})
} }
} }
@@ -174,7 +174,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point. // We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() { if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH { if filename == *META_FILEPATH {
watcher_router_clone.broadcast(); let _ = watcher_router_clone.broadcast();
} }
} }
} }
@@ -408,24 +408,6 @@ impl TerminatingWrite for SafeFileWriter {
} }
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
/// removed before the file is deleted. /// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
@@ -444,11 +426,6 @@ impl Directory for MmapDirectory {
} }
} }
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path); debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -479,25 +456,6 @@ impl Directory for MmapDirectory {
Ok(BufWriter::new(Box::new(writer))) Ok(BufWriter::new(Box::new(writer)))
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path); debug!("Atomic Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -531,6 +489,50 @@ impl Directory for MmapDirectory {
} }
} }
impl ReadOnlyDirectory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -538,16 +540,15 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use crate::indexer::LogMergePolicy;
use crate::schema::{Schema, SchemaBuilder, TEXT}; use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use std::fs; use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
#[test] #[test]
fn test_open_non_existant_path() { fn test_open_non_existent_path() {
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err()); assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
} }
@@ -640,13 +641,18 @@ mod tests {
let tmp_dir = tempfile::TempDir::new().unwrap(); let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp_dirpath = tmp_dir.path().to_owned(); let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap(); let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join("coucou"); let tmp_file = tmp_dirpath.join(*META_FILEPATH);
let _handle = watch_wrapper.watch(Box::new(move || { let _handle = watch_wrapper.watch(Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
})); }));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle2 = watch_wrapper.watch(Box::new(move || {
let _ = sender.send(());
}));
assert_eq!(counter.load(Ordering::SeqCst), 0); assert_eq!(counter.load(Ordering::SeqCst), 0);
fs::write(&tmp_file, b"whateverwilldo").unwrap(); fs::write(&tmp_file, b"whateverwilldo").unwrap();
thread::sleep(Duration::new(0, 1_000u32)); assert!(receiver.recv().is_ok());
assert!(counter.load(Ordering::SeqCst) >= 1);
} }
#[test] #[test]
@@ -655,34 +661,42 @@ mod tests {
let mut schema_builder: SchemaBuilder = Schema::builder(); let mut schema_builder: SchemaBuilder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
{ {
let index = Index::create(mmap_directory.clone(), schema).unwrap(); let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for _num_commits in 0..16 { let mut log_merge_policy = LogMergePolicy::default();
log_merge_policy.set_min_merge_size(3);
index_writer.set_merge_policy(Box::new(log_merge_policy));
for _num_commits in 0..10 {
for _ in 0..10 { for _ in 0..10 {
index_writer.add_document(doc!(text_field=>"abc")); index_writer.add_document(doc!(text_field=>"abc"));
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into() .try_into()
.unwrap(); .unwrap();
for _ in 0..30 {
for _ in 0..4 {
index_writer.add_document(doc!(text_field=>"abc")); index_writer.add_document(doc!(text_field=>"abc"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
} }
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
let num_segments = reader.searcher().segment_readers().len(); let num_segments = reader.searcher().segment_readers().len();
assert_eq!(num_segments, 4); assert!(num_segments <= 4);
assert_eq!( assert_eq!(
num_segments * 7, num_segments * 7,
mmap_directory.get_cache_info().mmapped.len() mmap_directory.get_cache_info().mmapped.len()
); );
} }
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0); assert!(mmap_directory.get_cache_info().mmapped.is_empty());
} }
} }

View File

@@ -7,6 +7,7 @@ WORM directory abstraction.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_directory; mod mmap_directory;
mod bundle_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod footer; mod footer;
@@ -19,13 +20,26 @@ mod watch_event_router;
pub mod error; pub mod error;
pub use self::directory::DirectoryLock; pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone}; pub use self::directory::{Directory, DirectoryClone, ReadOnlyDirectory};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub use self::ram_directory::RAMDirectory; pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList; pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write}; use std::io::{self, BufWriter, Write};
use std::path::PathBuf;
/// Outcome of the Garbage collection
pub struct GarbageCollectionResult {
/// List of files that were deleted in this cycle
pub deleted_files: Vec<PathBuf>,
/// List of files that were schedule to be deleted in this cycle,
/// but deletion did not work. This typically happens on windows,
/// as deleting a memory mapped file is forbidden.
///
/// If a searcher is still held, a file cannot be deleted.
/// This is not considered a bug, the file will simply be deleted
/// in the next GC.
pub failed_to_delete_files: Vec<PathBuf>,
}
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory; pub use self::mmap_directory::MmapDirectory;
@@ -33,6 +47,9 @@ pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory; pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly /// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
///
/// The point is that while the type is public, it cannot be built by anyone
/// outside of this module.
pub struct AntiCallToken(()); pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer /// Trait used to indicate when no more write need to be done on a writer
@@ -63,6 +80,13 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
} }
} }
#[cfg(test)]
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
/// Write object for Directory. /// Write object for Directory.
/// ///
/// `WritePtr` are required to implement both Write /// `WritePtr` are required to implement both Write

View File

@@ -1,4 +1,6 @@
use crate::common::CountingWriter;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken; use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
@@ -115,6 +117,22 @@ impl InnerDirectory {
fn total_mem_usage(&self) -> usize { fn total_mem_usage(&self) -> usize {
self.fs.values().map(|f| f.len()).sum() self.fs.values().map(|f| f.len()).sum()
} }
fn serialize_bundle(&self, wrt: &mut WritePtr) -> io::Result<()> {
let mut counting_writer = CountingWriter::wrap(wrt);
let mut file_index: HashMap<PathBuf, (u64, u64)> = HashMap::default();
for (path, source) in &self.fs {
let start = counting_writer.written_bytes();
counting_writer.write_all(source.as_slice())?;
let stop = counting_writer.written_bytes();
file_index.insert(path.to_path_buf(), (start, stop));
}
let index_offset = counting_writer.written_bytes();
serde_json::to_writer(&mut counting_writer, &file_index)?;
let index_offset_buffer = index_offset.to_le_bytes();
counting_writer.write_all(&index_offset_buffer[..])?;
Ok(())
}
} }
impl fmt::Debug for RAMDirectory { impl fmt::Debug for RAMDirectory {
@@ -144,13 +162,18 @@ impl RAMDirectory {
pub fn total_mem_usage(&self) -> usize { pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage() self.fs.read().unwrap().total_mem_usage()
} }
/// Serialize the RAMDirectory into a bundle.
///
/// This method will fail, write nothing, and return an error if a
/// clone of this repository exists.
pub fn serialize_bundle(self, wrt: &mut WritePtr) -> io::Result<()> {
let inner_directory_rlock = self.fs.read().unwrap();
inner_directory_rlock.serialize_bundle(wrt)
}
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| { fail_point!("RAMDirectory::delete", |_| {
use crate::directory::error::IOError; use crate::directory::error::IOError;
@@ -160,10 +183,6 @@ impl Directory for RAMDirectory {
self.fs.write().unwrap().delete(path) self.fs.write().unwrap().delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.write().unwrap(); let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
@@ -177,10 +196,6 @@ impl Directory for RAMDirectory {
} }
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new( fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
@@ -191,11 +206,11 @@ impl Directory for RAMDirectory {
// Reserve the path to prevent calls to .write() to succeed. // Reserve the path to prevent calls to .write() to succeed.
self.fs.write().unwrap().write(path_buf.clone(), &[]); self.fs.write().unwrap().write(path_buf.clone(), &[]);
let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone()); let mut vec_writer = VecWriter::new(path_buf, self.clone());
vec_writer.write_all(data)?; vec_writer.write_all(data)?;
vec_writer.flush()?; vec_writer.flush()?;
if path == Path::new(&*META_FILEPATH) { if path == Path::new(&*META_FILEPATH) {
self.fs.write().unwrap().watch_router.broadcast(); let _ = self.fs.write().unwrap().watch_router.broadcast();
} }
Ok(()) Ok(())
} }
@@ -204,3 +219,17 @@ impl Directory for RAMDirectory {
Ok(self.fs.write().unwrap().watch(watch_callback)) Ok(self.fs.write().unwrap().watch(watch_callback))
} }
} }
impl ReadOnlyDirectory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
}

View File

@@ -70,6 +70,12 @@ impl ReadOnlySource {
(left, right) (left, right)
} }
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Creates a ReadOnlySource that is just a /// Creates a ReadOnlySource that is just a
/// view over a slice of the data. /// view over a slice of the data.
/// ///

View File

@@ -1,25 +1,117 @@
use super::*; use super::*;
use futures::channel::oneshot;
use futures::executor::block_on;
use std::io::Write; use std::io::Write;
use std::mem; use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::Ordering; use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::Arc; use std::sync::Arc;
use std::thread;
use std::time;
use std::time::Duration; use std::time::Duration;
#[test] #[cfg(feature = "mmap")]
fn test_ram_directory() { mod mmap_directory_tests {
let mut ram_directory = RAMDirectory::create(); use crate::directory::MmapDirectory;
test_directory(&mut ram_directory);
type DirectoryImpl = MmapDirectory;
fn make_directory() -> DirectoryImpl {
MmapDirectory::create_from_tempdir().unwrap()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
} }
#[test] mod ram_directory_tests {
#[cfg(feature = "mmap")] use crate::directory::RAMDirectory;
fn test_mmap_directory() {
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap(); type DirectoryImpl = RAMDirectory;
test_directory(&mut mmap_directory);
fn make_directory() -> DirectoryImpl {
RAMDirectory::default()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
} }
#[test] #[test]
@@ -99,48 +191,39 @@ fn test_directory_delete(directory: &mut dyn Directory) {
assert!(directory.delete(&test_path).is_err()); assert!(directory.delete(&test_path).is_err());
} }
fn test_directory(directory: &mut dyn Directory) {
test_simple(directory);
test_rewrite_forbidden(directory);
test_write_create_the_file(directory);
test_directory_delete(directory);
test_lock_non_blocking(directory);
test_lock_blocking(directory);
test_watch(directory);
}
fn test_watch(directory: &mut dyn Directory) { fn test_watch(directory: &mut dyn Directory) {
let num_progress: Arc<AtomicUsize> = Default::default();
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let (sender, receiver) = crossbeam::channel::unbounded();
let watch_callback = Box::new(move || { let watch_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, SeqCst);
}); });
assert!(directory // This callback is used to synchronize watching in our unit test.
.atomic_write(Path::new("meta.json"), b"random_test_data") // We bind it to a variable because the callback is removed when that
.is_ok()); // handle is dropped.
thread::sleep(Duration::new(0, 10_000));
assert_eq!(0, counter.load(Ordering::SeqCst));
let watch_handle = directory.watch(watch_callback).unwrap(); let watch_handle = directory.watch(watch_callback).unwrap();
let _progress_listener = directory
.watch(Box::new(move || {
let val = num_progress.fetch_add(1, SeqCst);
let _ = sender.send(val);
}))
.unwrap();
for i in 0..10 { for i in 0..10 {
assert_eq!(i, counter.load(Ordering::SeqCst)); assert_eq!(i, counter.load(SeqCst));
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2") .atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok()); .is_ok());
for _ in 0..1_000 { assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
if counter.load(Ordering::SeqCst) > i { assert_eq!(i + 1, counter.load(SeqCst));
break;
}
thread::sleep(Duration::from_millis(10));
}
assert_eq!(i + 1, counter.load(Ordering::SeqCst));
} }
mem::drop(watch_handle); mem::drop(watch_handle);
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data") .atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok()); .is_ok());
thread::sleep(Duration::from_millis(200)); assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
assert_eq!(10, counter.load(Ordering::SeqCst)); assert_eq!(10, counter.load(SeqCst));
} }
fn test_lock_non_blocking(directory: &mut dyn Directory) { fn test_lock_non_blocking(directory: &mut dyn Directory) {
@@ -174,9 +257,13 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
is_blocking: true, is_blocking: true,
}); });
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
let in_thread = Arc::new(AtomicBool::default());
let in_thread_clone = in_thread.clone();
let (sender, receiver) = oneshot::channel();
std::thread::spawn(move || { std::thread::spawn(move || {
//< lock_a_res is sent to the thread. //< lock_a_res is sent to the thread.
std::thread::sleep(time::Duration::from_millis(10)); in_thread_clone.store(true, SeqCst);
let _just_sync = block_on(receiver);
// explicitely droping lock_a_res. It would have been sufficient to just force it // explicitely droping lock_a_res. It would have been sufficient to just force it
// to be part of the move, but the intent seems clearer that way. // to be part of the move, but the intent seems clearer that way.
drop(lock_a_res); drop(lock_a_res);
@@ -189,14 +276,18 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
}); });
assert!(lock_a_res.is_err()); assert!(lock_a_res.is_err());
} }
{ let directory_clone = directory.box_clone();
// the blocking call should wait for at least 10ms. let (sender2, receiver2) = oneshot::channel();
let start = time::Instant::now(); let join_handle = std::thread::spawn(move || {
let lock_a_res = directory.acquire_lock(&Lock { assert!(sender2.send(()).is_ok());
let lock_a_res = directory_clone.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
is_blocking: true, is_blocking: true,
}); });
assert!(in_thread.load(SeqCst));
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
assert!(start.elapsed().subsec_millis() >= 10); });
} assert!(block_on(receiver2).is_ok());
assert!(sender.send(()).is_ok());
assert!(join_handle.join().is_ok());
} }

View File

@@ -1,3 +1,5 @@
use futures::channel::oneshot;
use futures::{Future, TryFutureExt};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
@@ -22,13 +24,20 @@ pub struct WatchCallbackList {
#[derive(Clone)] #[derive(Clone)]
pub struct WatchHandle(Arc<WatchCallback>); pub struct WatchHandle(Arc<WatchCallback>);
impl WatchHandle {
/// Create a WatchHandle handle.
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
WatchHandle(watch_callback)
}
}
impl WatchCallbackList { impl WatchCallbackList {
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback. /// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle { pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
let watch_callback_arc = Arc::new(watch_callback); let watch_callback_arc = Arc::new(watch_callback);
let watch_callback_weak = Arc::downgrade(&watch_callback_arc); let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
self.router.write().unwrap().push(watch_callback_weak); self.router.write().unwrap().push(watch_callback_weak);
WatchHandle(watch_callback_arc) WatchHandle::new(watch_callback_arc)
} }
fn list_callback(&self) -> Vec<Arc<WatchCallback>> { fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
@@ -47,14 +56,21 @@ impl WatchCallbackList {
} }
/// Triggers all callbacks /// Triggers all callbacks
pub fn broadcast(&self) { pub fn broadcast(&self) -> impl Future<Output = ()> {
let callbacks = self.list_callback(); let callbacks = self.list_callback();
let (sender, receiver) = oneshot::channel();
let result = receiver.unwrap_or_else(|_| ());
if callbacks.is_empty() {
let _ = sender.send(());
return result;
}
let spawn_res = std::thread::Builder::new() let spawn_res = std::thread::Builder::new()
.name("watch-callbacks".to_string()) .name("watch-callbacks".to_string())
.spawn(move || { .spawn(move || {
for callback in callbacks { for callback in callbacks {
callback(); callback();
} }
let _ = sender.send(());
}); });
if let Err(err) = spawn_res { if let Err(err) = spawn_res {
error!( error!(
@@ -62,19 +78,17 @@ impl WatchCallbackList {
err err
); );
} }
result
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use futures::executor::block_on;
use std::mem; use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::thread;
use std::time::Duration;
const WAIT_TIME: u64 = 20;
#[test] #[test]
fn test_watch_event_router_simple() { fn test_watch_event_router_simple() {
@@ -84,22 +98,22 @@ mod tests {
let inc_callback = Box::new(move || { let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
}); });
watch_event_router.broadcast(); block_on(watch_event_router.broadcast());
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
let handle_a = watch_event_router.subscribe(inc_callback); let handle_a = watch_event_router.subscribe(inc_callback);
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
watch_event_router.broadcast(); block_on(watch_event_router.broadcast());
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(1, counter.load(Ordering::SeqCst)); assert_eq!(1, counter.load(Ordering::SeqCst));
watch_event_router.broadcast(); block_on(async {
watch_event_router.broadcast(); (
watch_event_router.broadcast(); watch_event_router.broadcast().await,
thread::sleep(Duration::from_millis(WAIT_TIME)); watch_event_router.broadcast().await,
watch_event_router.broadcast().await,
)
});
assert_eq!(4, counter.load(Ordering::SeqCst)); assert_eq!(4, counter.load(Ordering::SeqCst));
mem::drop(handle_a); mem::drop(handle_a);
watch_event_router.broadcast(); block_on(watch_event_router.broadcast());
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(4, counter.load(Ordering::SeqCst)); assert_eq!(4, counter.load(Ordering::SeqCst));
} }
@@ -115,20 +129,20 @@ mod tests {
}; };
let handle_a = watch_event_router.subscribe(inc_callback(1)); let handle_a = watch_event_router.subscribe(inc_callback(1));
let handle_a2 = watch_event_router.subscribe(inc_callback(10)); let handle_a2 = watch_event_router.subscribe(inc_callback(10));
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
watch_event_router.broadcast(); block_on(async {
watch_event_router.broadcast(); futures::join!(
thread::sleep(Duration::from_millis(WAIT_TIME)); watch_event_router.broadcast(),
watch_event_router.broadcast()
)
});
assert_eq!(22, counter.load(Ordering::SeqCst)); assert_eq!(22, counter.load(Ordering::SeqCst));
mem::drop(handle_a); mem::drop(handle_a);
watch_event_router.broadcast(); block_on(watch_event_router.broadcast());
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst)); assert_eq!(32, counter.load(Ordering::SeqCst));
mem::drop(handle_a2); mem::drop(handle_a2);
watch_event_router.broadcast(); block_on(watch_event_router.broadcast());
watch_event_router.broadcast(); block_on(watch_event_router.broadcast());
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst)); assert_eq!(32, counter.load(Ordering::SeqCst));
} }
@@ -142,14 +156,15 @@ mod tests {
}); });
let handle_a = watch_event_router.subscribe(inc_callback); let handle_a = watch_event_router.subscribe(inc_callback);
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
watch_event_router.broadcast(); block_on(async {
watch_event_router.broadcast(); let future1 = watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME)); let future2 = watch_event_router.broadcast();
futures::join!(future1, future2)
});
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
thread::sleep(Duration::from_millis(WAIT_TIME));
mem::drop(handle_a); mem::drop(handle_a);
watch_event_router.broadcast(); let _ = watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME)); block_on(watch_event_router.broadcast());
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
} }
} }

View File

@@ -2,8 +2,8 @@
use std::io; use std::io;
use crate::directory::error::LockError;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError; use crate::fastfield::FastFieldNotAvailableError;
use crate::query; use crate::query;
use crate::schema; use crate::schema;
@@ -25,10 +25,10 @@ impl DataCorruption {
} }
} }
pub fn comment_only(comment: String) -> DataCorruption { pub fn comment_only<TS: ToString>(comment: TS) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: None, filepath: None,
comment, comment: comment.to_string(),
} }
} }
} }
@@ -80,6 +80,9 @@ pub enum TantivyError {
/// System error. (e.g.: We failed spawning a new thread) /// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)] #[fail(display = "System error.'{}'", _0)]
SystemError(String), SystemError(String),
/// Index incompatible with current version of tantivy
#[fail(display = "{:?}", _0)]
IncompatibleIndex(Incompatibility),
} }
impl From<DataCorruption> for TantivyError { impl From<DataCorruption> for TantivyError {
@@ -129,6 +132,9 @@ impl From<OpenReadError> for TantivyError {
match error { match error {
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath), OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error), OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
OpenReadError::IncompatibleIndex(incompatibility) => {
TantivyError::IncompatibleIndex(incompatibility)
}
} }
} }
} }

View File

@@ -1,9 +1,8 @@
use crate::common::HasLen; use crate::common::{BitSet, HasLen};
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::space_usage::ByteCount; use crate::space_usage::ByteCount;
use crate::DocId; use crate::DocId;
use bit_set::BitSet;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -17,7 +16,7 @@ pub fn write_delete_bitset(
) -> io::Result<()> { ) -> io::Result<()> {
let mut byte = 0u8; let mut byte = 0u8;
let mut shift = 0u8; let mut shift = 0u8;
for doc in 0..(max_doc as usize) { for doc in 0..max_doc {
if delete_bitset.contains(doc) { if delete_bitset.contains(doc) {
byte |= 1 << shift; byte |= 1 << shift;
} }
@@ -32,7 +31,7 @@ pub fn write_delete_bitset(
if max_doc % 8 > 0 { if max_doc % 8 > 0 {
writer.write_all(&[byte])?; writer.write_all(&[byte])?;
} }
writer.flush() Ok(())
} }
/// Set of deleted `DocId`s. /// Set of deleted `DocId`s.
@@ -86,7 +85,6 @@ impl HasLen for DeleteBitSet {
mod tests { mod tests {
use super::*; use super::*;
use crate::directory::*; use crate::directory::*;
use bit_set::BitSet;
use std::path::PathBuf; use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) { fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
@@ -95,27 +93,26 @@ mod tests {
{ {
let mut writer = directory.open_write(&*test_path).unwrap(); let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap(); write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
writer.terminate().unwrap();
} }
{ let source = directory.open_read(&test_path).unwrap();
let source = directory.open_read(&test_path).unwrap(); let delete_bitset = DeleteBitSet::open(source);
let delete_bitset = DeleteBitSet::open(source); for doc in 0..max_doc {
for doc in 0..max_doc as usize { assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
} }
assert_eq!(delete_bitset.len(), bitset.len());
} }
#[test] #[test]
fn test_delete_bitset() { fn test_delete_bitset() {
{ {
let mut bitset = BitSet::with_capacity(10); let mut bitset = BitSet::with_max_value(10);
bitset.insert(1); bitset.insert(1);
bitset.insert(9); bitset.insert(9);
test_delete_bitset_helper(&bitset, 10); test_delete_bitset_helper(&bitset, 10);
} }
{ {
let mut bitset = BitSet::with_capacity(8); let mut bitset = BitSet::with_max_value(8);
bitset.insert(1); bitset.insert(1);
bitset.insert(2); bitset.insert(2);
bitset.insert(3); bitset.insert(3);

View File

@@ -33,6 +33,7 @@ pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer; pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::chrono::{NaiveDateTime, Utc};
use crate::common; use crate::common;
use crate::schema::Cardinality; use crate::schema::Cardinality;
use crate::schema::FieldType; use crate::schema::FieldType;
@@ -49,7 +50,7 @@ mod serializer;
mod writer; mod writer;
/// Trait for types that are allowed for fast fields: (u64, i64 and f64). /// Trait for types that are allowed for fast fields: (u64, i64 and f64).
pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd { pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
/// Converts a value from u64 /// Converts a value from u64
/// ///
/// Internally all fast field values are encoded as u64. /// Internally all fast field values are encoded as u64.
@@ -69,6 +70,12 @@ pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
/// Cast value to `u64`. /// Cast value to `u64`.
/// The value is just reinterpreted in memory. /// The value is just reinterpreted in memory.
fn as_u64(&self) -> u64; fn as_u64(&self) -> u64;
/// Build a default value. This default value is never used, so the value does not
/// really matter.
fn make_zero() -> Self {
Self::from_u64(0i64.to_u64())
}
} }
impl FastValue for u64 { impl FastValue for u64 {
@@ -135,11 +142,34 @@ impl FastValue for f64 {
} }
} }
impl FastValue for crate::DateTime {
fn from_u64(timestamp_u64: u64) -> Self {
let timestamp_i64 = i64::from_u64(timestamp_u64);
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
}
fn to_u64(&self) -> u64 {
self.timestamp().to_u64()
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.timestamp().as_u64()
}
}
fn value_to_u64(value: &Value) -> u64 { fn value_to_u64(value: &Value) -> u64 {
match *value { match *value {
Value::U64(ref val) => *val, Value::U64(ref val) => *val,
Value::I64(ref val) => common::i64_to_u64(*val), Value::I64(ref val) => common::i64_to_u64(*val),
Value::F64(ref val) => common::f64_to_u64(*val), Value::F64(ref val) => common::f64_to_u64(*val),
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value), _ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
} }
} }
@@ -149,12 +179,14 @@ mod tests {
use super::*; use super::*;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::schema::Document; use crate::merge_policy::NoMergePolicy;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;
use crate::schema::{Document, IntOptions};
use crate::{Index, SegmentId, SegmentReader};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
@@ -178,6 +210,12 @@ mod tests {
assert_eq!(test_fastfield.get(2), 300); assert_eq!(test_fastfield.get(2), 300);
} }
#[test]
pub fn test_fastfield_i64_u64() {
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
}
#[test] #[test]
fn test_intfastfield_small() { fn test_intfastfield_small() {
let path = Path::new("test"); let path = Path::new("test");
@@ -429,6 +467,93 @@ mod tests {
} }
} }
} }
#[test]
fn test_merge_missing_date_fast_field() {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
index_writer.commit().unwrap();
index_writer.add_document(doc!());
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let segment_ids: Vec<SegmentId> = reader
.searcher()
.segment_readers()
.iter()
.map(SegmentReader::segment_id)
.collect();
assert_eq!(segment_ids.len(), 2);
let merge_future = index_writer.merge(&segment_ids[..]);
let merge_res = futures::executor::block_on(merge_future);
assert!(merge_res.is_ok());
assert!(reader.reload().is_ok());
assert_eq!(reader.searcher().segment_readers().len(), 1);
}
#[test]
fn test_default_datetime() {
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
}
#[test]
fn test_datefastfield() {
use crate::fastfield::FastValue;
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", FAST);
let multi_date_field = schema_builder.add_date_field(
"multi_date",
IntOptions::default().set_fast(Cardinality::MultiValues),
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(1i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
));
index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(4i64.to_u64())
));
index_writer.add_document(doc!(
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let date_fast_field = fast_fields.date(date_field).unwrap();
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
let mut dates = vec![];
{
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
dates_fast_field.get_vals(0u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].timestamp(), 2i64);
assert_eq!(dates[1].timestamp(), 3i64);
}
{
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
dates_fast_field.get_vals(1u32, &mut dates);
assert!(dates.is_empty());
}
{
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
dates_fast_field.get_vals(2u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].timestamp(), 5i64);
assert_eq!(dates[1].timestamp(), 6i64);
}
}
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]

View File

@@ -45,7 +45,7 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) { pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
let (start, stop) = self.range(doc); let (start, stop) = self.range(doc);
let len = (stop - start) as usize; let len = (stop - start) as usize;
vals.resize(len, Item::default()); vals.resize(len, Item::make_zero());
self.vals_reader.get_range_u64(start, &mut vals[..]); self.vals_reader.get_range_u64(start, &mut vals[..]);
} }

View File

@@ -4,7 +4,7 @@ use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;

View File

@@ -15,9 +15,11 @@ pub struct FastFieldReaders {
fast_field_i64: HashMap<Field, FastFieldReader<i64>>, fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
fast_field_u64: HashMap<Field, FastFieldReader<u64>>, fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
fast_field_f64: HashMap<Field, FastFieldReader<f64>>, fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
fast_field_date: HashMap<Field, FastFieldReader<crate::DateTime>>,
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>, fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>, fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>, fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
fast_field_dates: HashMap<Field, MultiValueIntFastFieldReader<crate::DateTime>>,
fast_bytes: HashMap<Field, BytesFastFieldReader>, fast_bytes: HashMap<Field, BytesFastFieldReader>,
fast_fields_composite: CompositeFile, fast_fields_composite: CompositeFile,
} }
@@ -26,6 +28,7 @@ enum FastType {
I64, I64,
U64, U64,
F64, F64,
Date,
} }
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> { fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
@@ -39,6 +42,9 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
FieldType::F64(options) => options FieldType::F64(options) => options
.get_fastfield_cardinality() .get_fastfield_cardinality()
.map(|cardinality| (FastType::F64, cardinality)), .map(|cardinality| (FastType::F64, cardinality)),
FieldType::Date(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::Date, cardinality)),
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)), FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
_ => None, _ => None,
} }
@@ -53,9 +59,11 @@ impl FastFieldReaders {
fast_field_i64: Default::default(), fast_field_i64: Default::default(),
fast_field_u64: Default::default(), fast_field_u64: Default::default(),
fast_field_f64: Default::default(), fast_field_f64: Default::default(),
fast_field_date: Default::default(),
fast_field_i64s: Default::default(), fast_field_i64s: Default::default(),
fast_field_u64s: Default::default(), fast_field_u64s: Default::default(),
fast_field_f64s: Default::default(), fast_field_f64s: Default::default(),
fast_field_dates: Default::default(),
fast_bytes: Default::default(), fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(), fast_fields_composite: fast_fields_composite.clone(),
}; };
@@ -95,6 +103,12 @@ impl FastFieldReaders {
FastFieldReader::open(fast_field_data.clone()), FastFieldReader::open(fast_field_data.clone()),
); );
} }
FastType::Date => {
fast_field_readers.fast_field_date.insert(
field,
FastFieldReader::open(fast_field_data.clone()),
);
}
} }
} else { } else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry))); return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
@@ -130,6 +144,14 @@ impl FastFieldReaders {
.fast_field_f64s .fast_field_f64s
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::Date => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_dates
.insert(field, multivalued_int_fast_field);
}
} }
} else { } else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry))); return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
@@ -156,8 +178,6 @@ impl FastFieldReaders {
/// If the field is a i64-fast field, return the associated u64 reader. Values are /// If the field is a i64-fast field, return the associated u64 reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. /// /// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
/// ///
///TODO should it also be lenient with f64?
///
/// This method is useful when merging segment reader. /// This method is useful when merging segment reader.
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> { pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
if let Some(u64_ff_reader) = self.u64(field) { if let Some(u64_ff_reader) = self.u64(field) {
@@ -166,6 +186,12 @@ impl FastFieldReaders {
if let Some(i64_ff_reader) = self.i64(field) { if let Some(i64_ff_reader) = self.i64(field) {
return Some(i64_ff_reader.into_u64_reader()); return Some(i64_ff_reader.into_u64_reader());
} }
if let Some(f64_ff_reader) = self.f64(field) {
return Some(f64_ff_reader.into_u64_reader());
}
if let Some(date_ff_reader) = self.date(field) {
return Some(date_ff_reader.into_u64_reader());
}
None None
} }
@@ -176,6 +202,13 @@ impl FastFieldReaders {
self.fast_field_i64.get(&field).cloned() self.fast_field_i64.get(&field).cloned()
} }
/// Returns the `i64` fast field reader reader associated to `field`.
///
/// If `field` is not a i64 fast field, this method returns `None`.
pub fn date(&self, field: Field) -> Option<FastFieldReader<crate::DateTime>> {
self.fast_field_date.get(&field).cloned()
}
/// Returns the `f64` fast field reader reader associated to `field`. /// Returns the `f64` fast field reader reader associated to `field`.
/// ///
/// If `field` is not a f64 fast field, this method returns `None`. /// If `field` is not a f64 fast field, this method returns `None`.
@@ -202,6 +235,9 @@ impl FastFieldReaders {
if let Some(i64s_ff_reader) = self.i64s(field) { if let Some(i64s_ff_reader) = self.i64s(field) {
return Some(i64s_ff_reader.into_u64s_reader()); return Some(i64s_ff_reader.into_u64s_reader());
} }
if let Some(f64s_ff_reader) = self.f64s(field) {
return Some(f64s_ff_reader.into_u64s_reader());
}
None None
} }
@@ -219,6 +255,13 @@ impl FastFieldReaders {
self.fast_field_f64s.get(&field).cloned() self.fast_field_f64s.get(&field).cloned()
} }
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
pub fn dates(&self, field: Field) -> Option<MultiValueIntFastFieldReader<crate::DateTime>> {
self.fast_field_dates.get(&field).cloned()
}
/// Returns the `bytes` fast field reader associated to `field`. /// Returns the `bytes` fast field reader associated to `field`.
/// ///
/// If `field` is not a bytes fast field, returns `None`. /// If `field` is not a bytes fast field, returns `None`.

View File

@@ -4,7 +4,7 @@ use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer}; use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldType, Schema}; use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use std::collections::HashMap; use std::collections::HashMap;
@@ -17,6 +17,14 @@ pub struct FastFieldsWriter {
bytes_value_writers: Vec<BytesFastFieldWriter>, bytes_value_writers: Vec<BytesFastFieldWriter>,
} }
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
match *field_entry.field_type() {
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
_ => 0u64,
}
}
impl FastFieldsWriter { impl FastFieldsWriter {
/// Create all `FastFieldWriter` required by the schema. /// Create all `FastFieldWriter` required by the schema.
pub fn from_schema(schema: &Schema) -> FastFieldsWriter { pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
@@ -25,18 +33,15 @@ impl FastFieldsWriter {
let mut bytes_value_writers = Vec::new(); let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() { for (field, field_entry) in schema.fields() {
let default_value = match *field_entry.field_type() {
FieldType::I64(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
_ => 0u64,
};
match *field_entry.field_type() { match *field_entry.field_type() {
FieldType::I64(ref int_options) FieldType::I64(ref int_options)
| FieldType::U64(ref int_options) | FieldType::U64(ref int_options)
| FieldType::F64(ref int_options) => { | FieldType::F64(ref int_options)
| FieldType::Date(ref int_options) => {
match int_options.get_fastfield_cardinality() { match int_options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => { Some(Cardinality::SingleValue) => {
let mut fast_field_writer = IntFastFieldWriter::new(field); let mut fast_field_writer = IntFastFieldWriter::new(field);
let default_value = fast_field_default_value(field_entry);
fast_field_writer.set_val_if_missing(default_value); fast_field_writer.set_val_if_missing(default_value);
single_value_writers.push(fast_field_writer); single_value_writers.push(fast_field_writer);
} }

View File

@@ -2,7 +2,7 @@ use super::operation::DeleteOperation;
use crate::Opstamp; use crate::Opstamp;
use std::mem; use std::mem;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock, Weak};
// The DeleteQueue is similar in conceptually to a multiple // The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel. // consumer single producer broadcast channel.
@@ -14,14 +14,15 @@ use std::sync::{Arc, RwLock};
// //
// New consumer can be created in two ways // New consumer can be created in two ways
// - calling `delete_queue.cursor()` returns a cursor, that // - calling `delete_queue.cursor()` returns a cursor, that
// will include all future delete operation (and no past operations). // will include all future delete operation (and some or none
// of the past operations... The client is in charge of checking the opstamps.).
// - cloning an existing cursor returns a new cursor, that // - cloning an existing cursor returns a new cursor, that
// is at the exact same position, and can now advance independently // is at the exact same position, and can now advance independently
// from the original cursor. // from the original cursor.
#[derive(Default)] #[derive(Default)]
struct InnerDeleteQueue { struct InnerDeleteQueue {
writer: Vec<DeleteOperation>, writer: Vec<DeleteOperation>,
last_block: Option<Arc<Block>>, last_block: Weak<Block>,
} }
#[derive(Clone)] #[derive(Clone)]
@@ -32,21 +33,31 @@ pub struct DeleteQueue {
impl DeleteQueue { impl DeleteQueue {
// Creates a new delete queue. // Creates a new delete queue.
pub fn new() -> DeleteQueue { pub fn new() -> DeleteQueue {
let delete_queue = DeleteQueue { DeleteQueue {
inner: Arc::default(), inner: Arc::default(),
};
let next_block = NextBlock::from(delete_queue.clone());
{
let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
delete_queue_wlock.last_block = Some(Arc::new(Block {
operations: Arc::default(),
next: next_block,
}));
} }
}
delete_queue fn get_last_block(&self) -> Arc<Block> {
{
// try get the last block with simply acquiring the read lock.
let rlock = self.inner.read().unwrap();
if let Some(block) = rlock.last_block.upgrade() {
return block;
}
}
// It failed. Let's double check after acquiring the write, as someone could have called
// `get_last_block` right after we released the rlock.
let mut wlock = self.inner.write().unwrap();
if let Some(block) = wlock.last_block.upgrade() {
return block;
}
let block = Arc::new(Block {
operations: Arc::default(),
next: NextBlock::from(self.clone()),
});
wlock.last_block = Arc::downgrade(&block);
block
} }
// Creates a new cursor that makes it possible to // Creates a new cursor that makes it possible to
@@ -54,17 +65,7 @@ impl DeleteQueue {
// //
// Past delete operations are not accessible. // Past delete operations are not accessible.
pub fn cursor(&self) -> DeleteCursor { pub fn cursor(&self) -> DeleteCursor {
let last_block = self let last_block = self.get_last_block();
.inner
.read()
.expect("Read lock poisoned when opening delete queue cursor")
.last_block
.clone()
.expect(
"Failed to unwrap last_block. This should never happen
as the Option<> is only here to make
initialization possible",
);
let operations_len = last_block.operations.len(); let operations_len = last_block.operations.len();
DeleteCursor { DeleteCursor {
block: last_block, block: last_block,
@@ -100,23 +101,19 @@ impl DeleteQueue {
.write() .write()
.expect("Failed to acquire write lock on delete queue writer"); .expect("Failed to acquire write lock on delete queue writer");
let delete_operations; if self_wlock.writer.is_empty() {
{ return None;
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
if writer.is_empty() {
return None;
}
delete_operations = mem::replace(writer, vec![]);
} }
let next_block = NextBlock::from(self.clone()); let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
{
self_wlock.last_block = Some(Arc::new(Block { let new_block = Arc::new(Block {
operations: Arc::new(delete_operations), operations: Arc::new(delete_operations.into_boxed_slice()),
next: next_block, next: NextBlock::from(self.clone()),
})); });
}
self_wlock.last_block.clone() self_wlock.last_block = Arc::downgrade(&new_block);
Some(new_block)
} }
} }
@@ -170,7 +167,7 @@ impl NextBlock {
} }
struct Block { struct Block {
operations: Arc<Vec<DeleteOperation>>, operations: Arc<Box<[DeleteOperation]>>,
next: NextBlock, next: NextBlock,
} }

View File

@@ -1,14 +1,15 @@
use super::operation::{AddOperation, UserOperation}; use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater; use super::segment_updater::SegmentUpdater;
use super::PreparedCommit; use super::PreparedCommit;
use crate::common::BitSet;
use crate::core::Index; use crate::core::Index;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::directory::TerminatingWrite; use crate::directory::TerminatingWrite;
use crate::directory::{DirectoryLock, GarbageCollectionResult};
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset; use crate::fastfield::write_delete_bitset;
@@ -23,10 +24,9 @@ use crate::schema::Document;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use bit_set::BitSet;
use crossbeam::channel; use crossbeam::channel;
use futures::{Canceled, Future}; use futures::executor::block_on;
use futures::future::Future;
use smallvec::smallvec; use smallvec::smallvec;
use smallvec::SmallVec; use smallvec::SmallVec;
use std::mem; use std::mem;
@@ -72,7 +72,7 @@ pub struct IndexWriter {
heap_size_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
workers_join_handle: Vec<JoinHandle<Result<()>>>, workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
operation_receiver: OperationReceiver, operation_receiver: OperationReceiver,
operation_sender: OperationSender, operation_sender: OperationSender,
@@ -95,7 +95,7 @@ fn compute_deleted_bitset(
delete_cursor: &mut DeleteCursor, delete_cursor: &mut DeleteCursor,
doc_opstamps: &DocToOpstampMapping, doc_opstamps: &DocToOpstampMapping,
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> Result<bool> { ) -> crate::Result<bool> {
let mut might_have_changed = false; let mut might_have_changed = false;
while let Some(delete_op) = delete_cursor.get() { while let Some(delete_op) = delete_cursor.get() {
if delete_op.opstamp > target_opstamp { if delete_op.opstamp > target_opstamp {
@@ -115,7 +115,7 @@ fn compute_deleted_bitset(
while docset.advance() { while docset.advance() {
let deleted_doc = docset.doc(); let deleted_doc = docset.doc();
if deleted_doc < limit_doc { if deleted_doc < limit_doc {
delete_bitset.insert(deleted_doc as usize); delete_bitset.insert(deleted_doc);
might_have_changed = true; might_have_changed = true;
} }
} }
@@ -126,51 +126,60 @@ fn compute_deleted_bitset(
Ok(might_have_changed) Ok(might_have_changed)
} }
/// Advance delete for the given segment up /// Advance delete for the given segment up to the target opstamp.
/// to the target opstamp. ///
/// Note that there are no guarantee that the resulting `segment_entry` delete_opstamp
/// is `==` target_opstamp.
/// For instance, there was no delete operation between the state of the `segment_entry` and
/// the `target_opstamp`, `segment_entry` is not updated.
pub(crate) fn advance_deletes( pub(crate) fn advance_deletes(
mut segment: Segment, mut segment: Segment,
segment_entry: &mut SegmentEntry, segment_entry: &mut SegmentEntry,
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> Result<()> { ) -> crate::Result<()> {
{ if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) { // We are already up-to-date here.
// We are already up-to-date here. return Ok(());
return Ok(()); }
}
let segment_reader = SegmentReader::open(&segment)?; if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() {
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
return Ok(());
}
let max_doc = segment_reader.max_doc(); let segment_reader = SegmentReader::open(&segment)?;
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_capacity(max_doc as usize),
};
let delete_cursor = segment_entry.delete_cursor(); let max_doc = segment_reader.max_doc();
compute_deleted_bitset( let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
&mut delete_bitset, Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
&segment_reader, None => BitSet::with_max_value(max_doc),
delete_cursor, };
&DocToOpstampMapping::None,
target_opstamp,
)?;
// TODO optimize compute_deleted_bitset(
&mut delete_bitset,
&segment_reader,
segment_entry.delete_cursor(),
&DocToOpstampMapping::None,
target_opstamp,
)?;
// TODO optimize
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
for doc in 0u32..max_doc { for doc in 0u32..max_doc {
if segment_reader.is_deleted(doc) { if seg_delete_bitset.is_deleted(doc) {
delete_bitset.insert(doc as usize); delete_bitset.insert(doc);
} }
} }
let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 {
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
delete_file.terminate()?;
}
} }
let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 {
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
delete_file.terminate()?;
}
segment_entry.set_meta(segment.meta().clone()); segment_entry.set_meta(segment.meta().clone());
Ok(()) Ok(())
} }
@@ -181,7 +190,7 @@ fn index_documents(
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>, grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
segment_updater: &mut SegmentUpdater, segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor, mut delete_cursor: DeleteCursor,
) -> Result<bool> { ) -> crate::Result<bool> {
let schema = segment.schema(); let schema = segment.schema();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?; let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
@@ -227,7 +236,7 @@ fn index_documents(
delete_cursor, delete_cursor,
delete_bitset_opt, delete_bitset_opt,
); );
segment_updater.add_segment(segment_entry); block_on(segment_updater.schedule_add_segment(segment_entry))?;
Ok(true) Ok(true)
} }
@@ -236,7 +245,7 @@ fn apply_deletes(
mut delete_cursor: &mut DeleteCursor, mut delete_cursor: &mut DeleteCursor,
doc_opstamps: &[Opstamp], doc_opstamps: &[Opstamp],
last_docstamp: Opstamp, last_docstamp: Opstamp,
) -> Result<Option<BitSet<u32>>> { ) -> crate::Result<Option<BitSet>> {
if delete_cursor.get().is_none() { if delete_cursor.get().is_none() {
// if there are no delete operation in the queue, no need // if there are no delete operation in the queue, no need
// to even open the segment. // to even open the segment.
@@ -246,7 +255,7 @@ fn apply_deletes(
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps); let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
let max_doc = segment.meta().max_doc(); let max_doc = segment.meta().max_doc();
let mut deleted_bitset = BitSet::with_capacity(max_doc as usize); let mut deleted_bitset = BitSet::with_max_value(max_doc);
let may_have_deletes = compute_deleted_bitset( let may_have_deletes = compute_deleted_bitset(
&mut deleted_bitset, &mut deleted_bitset,
&segment_reader, &segment_reader,
@@ -281,7 +290,7 @@ impl IndexWriter {
num_threads: usize, num_threads: usize,
heap_size_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
directory_lock: DirectoryLock, directory_lock: DirectoryLock,
) -> Result<IndexWriter> { ) -> crate::Result<IndexWriter> {
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN { if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
let err_msg = format!( let err_msg = format!(
"The heap size per thread needs to be at least {}.", "The heap size per thread needs to be at least {}.",
@@ -330,12 +339,17 @@ impl IndexWriter {
Ok(index_writer) Ok(index_writer)
} }
fn drop_sender(&mut self) {
let (sender, _receiver) = channel::bounded(1);
mem::replace(&mut self.operation_sender, sender);
}
/// If there are some merging threads, blocks until they all finish their work and /// If there are some merging threads, blocks until they all finish their work and
/// then drop the `IndexWriter`. /// then drop the `IndexWriter`.
pub fn wait_merging_threads(mut self) -> Result<()> { pub fn wait_merging_threads(mut self) -> crate::Result<()> {
// this will stop the indexing thread, // this will stop the indexing thread,
// dropping the last reference to the segment_updater. // dropping the last reference to the segment_updater.
drop(self.operation_sender); self.drop_sender();
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]); let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
for join_handle in former_workers_handles { for join_handle in former_workers_handles {
@@ -346,7 +360,6 @@ impl IndexWriter {
TantivyError::ErrorInThread("Error in indexing worker thread.".into()) TantivyError::ErrorInThread("Error in indexing worker thread.".into())
})?; })?;
} }
drop(self.workers_join_handle);
let result = self let result = self
.segment_updater .segment_updater
@@ -361,10 +374,10 @@ impl IndexWriter {
} }
#[doc(hidden)] #[doc(hidden)]
pub fn add_segment(&mut self, segment_meta: SegmentMeta) { pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
let delete_cursor = self.delete_queue.cursor(); let delete_cursor = self.delete_queue.cursor();
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None); let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
self.segment_updater.add_segment(segment_entry); block_on(self.segment_updater.schedule_add_segment(segment_entry))
} }
/// Creates a new segment. /// Creates a new segment.
@@ -381,7 +394,7 @@ impl IndexWriter {
/// Spawns a new worker thread for indexing. /// Spawns a new worker thread for indexing.
/// The thread consumes documents from the pipeline. /// The thread consumes documents from the pipeline.
fn add_indexing_worker(&mut self) -> Result<()> { fn add_indexing_worker(&mut self) -> crate::Result<()> {
let document_receiver_clone = self.operation_receiver.clone(); let document_receiver_clone = self.operation_receiver.clone();
let mut segment_updater = self.segment_updater.clone(); let mut segment_updater = self.segment_updater.clone();
@@ -389,7 +402,7 @@ impl IndexWriter {
let mem_budget = self.heap_size_in_bytes_per_thread; let mem_budget = self.heap_size_in_bytes_per_thread;
let index = self.index.clone(); let index = self.index.clone();
let join_handle: JoinHandle<Result<()>> = thread::Builder::new() let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
.name(format!("thrd-tantivy-index{}", self.worker_id)) .name(format!("thrd-tantivy-index{}", self.worker_id))
.spawn(move || { .spawn(move || {
loop { loop {
@@ -435,22 +448,23 @@ impl IndexWriter {
self.segment_updater.get_merge_policy() self.segment_updater.get_merge_policy()
} }
/// Set the merge policy. /// Setter for the merge policy.
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
self.segment_updater.set_merge_policy(merge_policy); self.segment_updater.set_merge_policy(merge_policy);
} }
fn start_workers(&mut self) -> Result<()> { fn start_workers(&mut self) -> crate::Result<()> {
for _ in 0..self.num_threads { for _ in 0..self.num_threads {
self.add_indexing_worker()?; self.add_indexing_worker()?;
} }
Ok(()) Ok(())
} }
/// Detects and removes the files that /// Detects and removes the files that are not used by the index anymore.
/// are not used by the index anymore. pub fn garbage_collect_files(
pub fn garbage_collect_files(&mut self) -> Result<()> { &self,
self.segment_updater.garbage_collect_files().wait() ) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
self.segment_updater.schedule_garbage_collect()
} }
/// Deletes all documents from the index /// Deletes all documents from the index
@@ -489,7 +503,7 @@ impl IndexWriter {
/// Ok(()) /// Ok(())
/// } /// }
/// ``` /// ```
pub fn delete_all_documents(&mut self) -> Result<Opstamp> { pub fn delete_all_documents(&self) -> crate::Result<Opstamp> {
// Delete segments // Delete segments
self.segment_updater.remove_all_segments(); self.segment_updater.remove_all_segments();
// Return new stamp - reverted stamp // Return new stamp - reverted stamp
@@ -503,8 +517,10 @@ impl IndexWriter {
pub fn merge( pub fn merge(
&mut self, &mut self,
segment_ids: &[SegmentId], segment_ids: &[SegmentId],
) -> Result<impl Future<Item = SegmentMeta, Error = Canceled>> { ) -> impl Future<Output = crate::Result<SegmentMeta>> {
self.segment_updater.start_merge(segment_ids) let merge_operation = self.segment_updater.make_merge_operation(segment_ids);
let segment_updater = self.segment_updater.clone();
async move { segment_updater.start_merge(merge_operation)?.await }
} }
/// Closes the current document channel send. /// Closes the current document channel send.
@@ -530,13 +546,8 @@ impl IndexWriter {
/// state as it was after the last commit. /// state as it was after the last commit.
/// ///
/// The opstamp at the last commit is returned. /// The opstamp at the last commit is returned.
pub fn rollback(&mut self) -> Result<Opstamp> { pub fn rollback(&mut self) -> crate::Result<Opstamp> {
info!("Rolling back to opstamp {}", self.committed_opstamp); info!("Rolling back to opstamp {}", self.committed_opstamp);
self.rollback_impl()
}
/// Private, implementation of rollback
fn rollback_impl(&mut self) -> Result<Opstamp> {
// marks the segment updater as killed. From now on, all // marks the segment updater as killed. From now on, all
// segment updates will be ignored. // segment updates will be ignored.
self.segment_updater.kill(); self.segment_updater.kill();
@@ -592,7 +603,7 @@ impl IndexWriter {
/// It is also possible to add a payload to the `commit` /// It is also possible to add a payload to the `commit`
/// using this API. /// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html) /// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> { pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
// Here, because we join all of the worker threads, // Here, because we join all of the worker threads,
// all of the segment update for this commit have been // all of the segment update for this commit have been
// sent. // sent.
@@ -639,7 +650,7 @@ impl IndexWriter {
/// Commit returns the `opstamp` of the last document /// Commit returns the `opstamp` of the last document
/// that made it in the commit. /// that made it in the commit.
/// ///
pub fn commit(&mut self) -> Result<Opstamp> { pub fn commit(&mut self) -> crate::Result<Opstamp> {
self.prepare_commit()?.commit() self.prepare_commit()?.commit()
} }
@@ -680,9 +691,6 @@ impl IndexWriter {
/// The opstamp is an increasing `u64` that can /// The opstamp is an increasing `u64` that can
/// be used by the client to align commits with its own /// be used by the client to align commits with its own
/// document queue. /// document queue.
///
/// Currently it represents the number of documents that
/// have been added since the creation of the index.
pub fn add_document(&self, document: Document) -> Opstamp { pub fn add_document(&self, document: Document) -> Opstamp {
let opstamp = self.stamper.stamp(); let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document }; let add_operation = AddOperation { opstamp, document };
@@ -756,6 +764,16 @@ impl IndexWriter {
} }
} }
impl Drop for IndexWriter {
fn drop(&mut self) {
self.segment_updater.kill();
self.drop_sender();
for work in self.workers_join_handle.drain(..) {
let _ = work.join();
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -765,7 +783,7 @@ mod tests {
use crate::error::*; use crate::error::*;
use crate::indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::{self, IndexRecordOption}; use crate::schema::{self, IndexRecordOption, STRING};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use crate::Term; use crate::Term;
@@ -1190,4 +1208,16 @@ mod tests {
assert!(clear_again.is_ok()); assert!(clear_again.is_ok());
assert!(commit_again.is_ok()); assert!(commit_again.is_ok());
} }
#[test]
fn test_index_doc_missing_field() {
let mut schema_builder = schema::Schema::builder();
let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid"));
let commit = index_writer.commit();
assert!(commit.is_ok());
}
} }

View File

@@ -2,14 +2,23 @@ use crate::Opstamp;
use crate::SegmentId; use crate::SegmentId;
use census::{Inventory, TrackedObject}; use census::{Inventory, TrackedObject};
use std::collections::HashSet; use std::collections::HashSet;
use std::ops::Deref;
#[derive(Default)] #[derive(Default)]
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>); pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
impl Deref for MergeOperationInventory {
type Target = Inventory<InnerMergeOperation>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl MergeOperationInventory { impl MergeOperationInventory {
pub fn segment_in_merge(&self) -> HashSet<SegmentId> { pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
let mut segment_in_merge = HashSet::default(); let mut segment_in_merge = HashSet::default();
for merge_op in self.0.list() { for merge_op in self.list() {
for &segment_id in &merge_op.segment_ids { for &segment_id in &merge_op.segment_ids {
segment_in_merge.insert(segment_id); segment_in_merge.insert(segment_id);
} }
@@ -35,13 +44,13 @@ pub struct MergeOperation {
inner: TrackedObject<InnerMergeOperation>, inner: TrackedObject<InnerMergeOperation>,
} }
struct InnerMergeOperation { pub(crate) struct InnerMergeOperation {
target_opstamp: Opstamp, target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
} }
impl MergeOperation { impl MergeOperation {
pub fn new( pub(crate) fn new(
inventory: &MergeOperationInventory, inventory: &MergeOperationInventory,
target_opstamp: Opstamp, target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
@@ -51,7 +60,7 @@ impl MergeOperation {
segment_ids, segment_ids,
}; };
MergeOperation { MergeOperation {
inner: inventory.0.track(inner_merge_operation), inner: inventory.track(inner_merge_operation),
} }
} }

View File

@@ -709,7 +709,7 @@ mod tests {
use crate::IndexWriter; use crate::IndexWriter;
use crate::Searcher; use crate::Searcher;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::Future; use futures::executor::block_on;
use std::io::Cursor; use std::io::Cursor;
#[test] #[test]
@@ -792,11 +792,7 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
} }
{ {
@@ -1040,11 +1036,7 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
index_writer block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
@@ -1139,11 +1131,7 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
index_writer block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
@@ -1277,11 +1265,7 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
test_searcher( test_searcher(
@@ -1336,11 +1320,7 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
index_writer block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
// commit has not been called yet. The document should still be // commit has not been called yet. The document should still be
// there. // there.
@@ -1361,22 +1341,18 @@ mod tests {
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_u64(int_field, 1); doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone()); index_writer.add_document(doc.clone());
index_writer.commit().expect("commit failed"); assert!(index_writer.commit().is_ok());
index_writer.add_document(doc); index_writer.add_document(doc);
index_writer.commit().expect("commit failed"); assert!(index_writer.commit().is_ok());
index_writer.delete_term(Term::from_field_u64(int_field, 1)); index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
index_writer assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
// assert delete has not been committed // assert delete has not been committed
reader.reload().expect("failed to load searcher 1"); assert!(reader.reload().is_ok());
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
@@ -1415,12 +1391,12 @@ mod tests {
index_doc(&mut index_writer, &[1, 5]); index_doc(&mut index_writer, &[1, 5]);
index_doc(&mut index_writer, &[3]); index_doc(&mut index_writer, &[3]);
index_doc(&mut index_writer, &[17]); index_doc(&mut index_writer, &[17]);
index_writer.commit().expect("committed"); assert!(index_writer.commit().is_ok());
index_doc(&mut index_writer, &[20]); index_doc(&mut index_writer, &[20]);
index_writer.commit().expect("committed"); assert!(index_writer.commit().is_ok());
index_doc(&mut index_writer, &[28, 27]); index_doc(&mut index_writer, &[28, 27]);
index_doc(&mut index_writer, &[1_000]); index_doc(&mut index_writer, &[1_000]);
index_writer.commit().expect("committed"); assert!(index_writer.commit().is_ok());
} }
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
@@ -1452,15 +1428,6 @@ mod tests {
assert_eq!(&vals, &[17]); assert_eq!(&vals, &[17]);
} }
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
{ {
let segment = searcher.segment_reader(1u32); let segment = searcher.segment_reader(1u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1484,27 +1451,13 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
.merge(&segment_ids) assert!(index_writer.wait_merging_threads().is_ok());
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer
.wait_merging_threads()
.expect("Wait for merging threads");
} }
reader.reload().expect("Load searcher"); assert!(reader.reload().is_ok());
{ {
let searcher = reader.searcher(); let searcher = reader.searcher();
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1539,4 +1492,46 @@ mod tests {
assert_eq!(&vals, &[20]); assert_eq!(&vals, &[20]);
} }
} }
#[test]
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let fast_multi = IntOptions::default().set_fast(Cardinality::MultiValues);
let field = builder.add_f64_field("f64", schema::FAST);
let multi_field = builder.add_f64_field("f64s", fast_multi);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
// Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default();
policy.set_min_merge_size(2);
writer.set_merge_policy(Box::new(policy));
for i in 0..100 {
let mut doc = Document::new();
doc.add_f64(field, 42.0);
doc.add_f64(multi_field, 0.24);
doc.add_f64(multi_field, 0.27);
writer.add_document(doc);
if i % 5 == 0 {
writer.commit()?;
}
}
writer.commit()?;
writer.wait_merging_threads()?;
// If a merging thread fails, we should end up with more
// than one segment here
assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
} }

View File

@@ -18,7 +18,7 @@ mod stamper;
pub use self::index_writer::IndexWriter; pub use self::index_writer::IndexWriter;
pub use self::log_merge_policy::LogMergePolicy; pub use self::log_merge_policy::LogMergePolicy;
pub use self::merge_operation::{MergeOperation, MergeOperationInventory}; pub use self::merge_operation::MergeOperation;
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy}; pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
pub use self::prepared_commit::PreparedCommit; pub use self::prepared_commit::PreparedCommit;
pub use self::segment_entry::SegmentEntry; pub use self::segment_entry::SegmentEntry;
@@ -33,6 +33,7 @@ pub type DefaultMergePolicy = LogMergePolicy;
mod tests { mod tests {
use crate::schema::{self, Schema}; use crate::schema::{self, Schema};
use crate::{Index, Term}; use crate::{Index, Term};
#[test] #[test]
fn test_advance_delete_bug() { fn test_advance_delete_bug() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();

View File

@@ -1,6 +1,7 @@
use super::IndexWriter; use super::IndexWriter;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use crate::Result;
use futures::executor::block_on;
/// A prepared commit /// A prepared commit
pub struct PreparedCommit<'a> { pub struct PreparedCommit<'a> {
@@ -32,9 +33,11 @@ impl<'a> PreparedCommit<'a> {
pub fn commit(self) -> Result<Opstamp> { pub fn commit(self) -> Result<Opstamp> {
info!("committing {}", self.opstamp); info!("committing {}", self.opstamp);
self.index_writer let _ = block_on(
.segment_updater() self.index_writer
.commit(self.opstamp, self.payload)?; .segment_updater()
.schedule_commit(self.opstamp, self.payload),
);
Ok(self.opstamp) Ok(self.opstamp)
} }
} }

View File

@@ -1,7 +1,7 @@
use crate::common::BitSet;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use bit_set::BitSet;
use std::fmt; use std::fmt;
/// A segment entry describes the state of /// A segment entry describes the state of

View File

@@ -16,6 +16,28 @@ struct SegmentRegisters {
committed: SegmentRegister, committed: SegmentRegister,
} }
#[derive(PartialEq, Eq)]
pub(crate) enum SegmentsStatus {
Committed,
Uncommitted,
}
impl SegmentRegisters {
/// Check if all the segments are committed or uncommited.
///
/// If some segment is missing or segments are in a different state (this should not happen
/// if tantivy is used correctly), returns `None`.
fn segments_status(&self, segment_ids: &[SegmentId]) -> Option<SegmentsStatus> {
if self.uncommitted.contains_all(segment_ids) {
Some(SegmentsStatus::Uncommitted)
} else if self.committed.contains_all(segment_ids) {
Some(SegmentsStatus::Committed)
} else {
None
}
}
}
/// The segment manager stores the list of segments /// The segment manager stores the list of segments
/// as well as their state. /// as well as their state.
/// ///
@@ -153,33 +175,35 @@ impl SegmentManager {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock.uncommitted.add_segment_entry(segment_entry); registers_lock.uncommitted.add_segment_entry(segment_entry);
} }
// Replace a list of segments for their equivalent merged segment.
pub fn end_merge( //
// Returns true if these segments are committed, false if the merge segments are uncommited.
pub(crate) fn end_merge(
&self, &self,
before_merge_segment_ids: &[SegmentId], before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry, after_merge_segment_entry: SegmentEntry,
) { ) -> crate::Result<SegmentsStatus> {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
let target_register: &mut SegmentRegister = { let segments_status = registers_lock
if registers_lock .segments_status(before_merge_segment_ids)
.uncommitted .ok_or_else(|| {
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.uncommitted
} else if registers_lock
.committed
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.committed
} else {
warn!("couldn't find segment in SegmentManager"); warn!("couldn't find segment in SegmentManager");
return; crate::Error::InvalidArgument(
} "The segments that were merged could not be found in the SegmentManager. \
This is not necessarily a bug, and can happen after a rollback for instance."
.to_string(),
)
})?;
let target_register: &mut SegmentRegister = match segments_status {
SegmentsStatus::Uncommitted => &mut registers_lock.uncommitted,
SegmentsStatus::Committed => &mut registers_lock.committed,
}; };
for segment_id in before_merge_segment_ids { for segment_id in before_merge_segment_ids {
target_register.remove_segment(segment_id); target_register.remove_segment(segment_id);
} }
target_register.add_segment_entry(after_merge_segment_entry); target_register.add_segment_entry(after_merge_segment_entry);
Ok(segments_status)
} }
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> { pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {

View File

@@ -1,10 +1,13 @@
use crate::Result; use crate::Directory;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::directory::error::OpenWriteError;
use crate::directory::{DirectoryClone, RAMDirectory, TerminatingWrite, WritePtr};
use crate::fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use crate::fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use crate::postings::InvertedIndexSerializer; use crate::postings::InvertedIndexSerializer;
use crate::schema::Schema;
use crate::store::StoreWriter; use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk /// Segment serializer is in charge of laying out on disk
@@ -14,25 +17,50 @@ pub struct SegmentSerializer {
fast_field_serializer: FastFieldSerializer, fast_field_serializer: FastFieldSerializer,
fieldnorms_serializer: FieldNormsSerializer, fieldnorms_serializer: FieldNormsSerializer,
postings_serializer: InvertedIndexSerializer, postings_serializer: InvertedIndexSerializer,
bundle_writer: Option<(RAMDirectory, WritePtr)>,
}
pub(crate) struct SegmentSerializerWriters {
postings_wrt: WritePtr,
positions_skip_wrt: WritePtr,
positions_wrt: WritePtr,
terms_wrt: WritePtr,
fast_field_wrt: WritePtr,
fieldnorms_wrt: WritePtr,
store_wrt: WritePtr,
}
impl SegmentSerializerWriters {
pub(crate) fn for_segment(segment: &mut Segment) -> Result<Self, OpenWriteError> {
Ok(SegmentSerializerWriters {
postings_wrt: segment.open_write(SegmentComponent::POSTINGS)?,
positions_skip_wrt: segment.open_write(SegmentComponent::POSITIONS)?,
positions_wrt: segment.open_write(SegmentComponent::POSITIONSSKIP)?,
terms_wrt: segment.open_write(SegmentComponent::TERMS)?,
fast_field_wrt: segment.open_write(SegmentComponent::FASTFIELDS)?,
fieldnorms_wrt: segment.open_write(SegmentComponent::FIELDNORMS)?,
store_wrt: segment.open_write(SegmentComponent::STORE)?,
})
}
} }
impl SegmentSerializer { impl SegmentSerializer {
/// Creates a new `SegmentSerializer`. pub(crate) fn new(schema: Schema, writers: SegmentSerializerWriters) -> crate::Result<Self> {
pub fn for_segment(segment: &mut Segment) -> Result<SegmentSerializer> { let fast_field_serializer = FastFieldSerializer::from_write(writers.fast_field_wrt)?;
let store_write = segment.open_write(SegmentComponent::STORE)?; let fieldnorms_serializer = FieldNormsSerializer::from_write(writers.fieldnorms_wrt)?;
let postings_serializer = InvertedIndexSerializer::open(
let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?; schema,
let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?; writers.terms_wrt,
writers.postings_wrt,
let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?; writers.positions_wrt,
let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?; writers.positions_skip_wrt,
);
let postings_serializer = InvertedIndexSerializer::open(segment)?;
Ok(SegmentSerializer { Ok(SegmentSerializer {
store_writer: StoreWriter::new(store_write), store_writer: StoreWriter::new(writers.store_wrt),
fast_field_serializer, fast_field_serializer,
fieldnorms_serializer, fieldnorms_serializer,
postings_serializer, postings_serializer,
bundle_writer: None,
}) })
} }
@@ -57,11 +85,15 @@ impl SegmentSerializer {
} }
/// Finalize the segment serialization. /// Finalize the segment serialization.
pub fn close(self) -> Result<()> { pub fn close(mut self) -> crate::Result<()> {
self.fast_field_serializer.close()?; self.fast_field_serializer.close()?;
self.postings_serializer.close()?; self.postings_serializer.close()?;
self.store_writer.close()?; self.store_writer.close()?;
self.fieldnorms_serializer.close()?; self.fieldnorms_serializer.close()?;
if let Some((ram_directory, mut bundle_wrt)) = self.bundle_writer.take() {
ram_directory.serialize_bundle(&mut bundle_wrt)?;
bundle_wrt.terminate()?;
}
Ok(()) Ok(())
} }
} }

View File

@@ -6,39 +6,35 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone}; use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes; use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory; use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger; use crate::indexer::merger::IndexMerger;
use crate::indexer::segment_manager::SegmentsStatus;
use crate::indexer::segment_serializer::SegmentSerializerWriters;
use crate::indexer::stamper::Stamper; use crate::indexer::stamper::Stamper;
use crate::indexer::MergeOperation;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy}; use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::indexer::{MergeCandidate, MergeOperation};
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use futures::channel::oneshot;
use futures::oneshot; use futures::executor::{ThreadPool, ThreadPoolBuilder};
use futures::sync::oneshot::Receiver; use futures::future::Future;
use futures::Future; use futures::future::TryFutureExt;
use futures_cpupool::Builder as CpuPoolBuilder;
use futures_cpupool::CpuFuture;
use futures_cpupool::CpuPool;
use serde_json; use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::io::Write; use std::io::Write;
use std::mem; use std::ops::Deref;
use std::ops::DerefMut;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::thread;
use std::thread::JoinHandle; const NUM_MERGE_THREADS: usize = 4;
/// Save the index meta file. /// Save the index meta file.
/// This operation is atomic : /// This operation is atomic :
@@ -49,7 +45,7 @@ use std::thread::JoinHandle;
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> { pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
save_metas( save_metas(
&IndexMeta { &IndexMeta {
segments: Vec::new(), segments: Vec::new(),
@@ -70,7 +66,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<(
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> { fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
info!("save metas"); info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
@@ -89,21 +85,38 @@ fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
// We voluntarily pass a merge_operation ref to guarantee that // We voluntarily pass a merge_operation ref to guarantee that
// the merge_operation is alive during the process // the merge_operation is alive during the process
#[derive(Clone)] #[derive(Clone)]
pub struct SegmentUpdater(Arc<InnerSegmentUpdater>); pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>);
fn perform_merge( impl Deref for SegmentUpdater {
merge_operation: &MergeOperation, type Target = InnerSegmentUpdater;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
async fn garbage_collect_files(
segment_updater: SegmentUpdater,
) -> crate::Result<GarbageCollectionResult> {
info!("Running garbage collection");
let mut index = segment_updater.index.clone();
index
.directory_mut()
.garbage_collect(move || segment_updater.list_files())
}
/// Merges a list of segments the list of segment givens in the `segment_entries`.
/// This function happens in the calling thread and is computationally expensive.
fn merge(
index: &Index, index: &Index,
mut segment_entries: Vec<SegmentEntry>, mut segment_entries: Vec<SegmentEntry>,
) -> Result<SegmentEntry> { target_opstamp: Opstamp,
let target_opstamp = merge_operation.target_opstamp(); ) -> crate::Result<SegmentEntry> {
// first we need to apply deletes to our segment. // first we need to apply deletes to our segment.
let mut merged_segment = index.new_segment(); let mut merged_segment = index.new_segment();
// TODO add logging // First we apply all of the delet to the merged segment, up to the target opstamp.
let schema = index.schema();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
let segment = index.segment(segment_entry.meta().clone()); let segment = index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?; advance_deletes(segment, segment_entry, target_opstamp)?;
@@ -117,22 +130,21 @@ fn perform_merge(
.collect(); .collect();
// An IndexMerger is like a "view" of our merged segments. // An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(schema, &segments[..])?; let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
// ... we just serialize this index merger in our new segment // ... we just serialize this index merger in our new segment to merge the two segments.
// to merge the two segments. let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut merged_segment)?;
let segment_serializer =
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?; SegmentSerializer::new(merged_segment.schema(), segment_serializer_wrts)?;
let num_docs = merger.write(segment_serializer)?; let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs); let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None); Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
Ok(after_merge_segment_entry)
} }
struct InnerSegmentUpdater { pub(crate) struct InnerSegmentUpdater {
// we keep a copy of the current active IndexMeta to // we keep a copy of the current active IndexMeta to
// avoid loading the file everytime we need it in the // avoid loading the file everytime we need it in the
// `SegmentUpdater`. // `SegmentUpdater`.
@@ -140,12 +152,12 @@ struct InnerSegmentUpdater {
// This should be up to date as all update happen through // This should be up to date as all update happen through
// the unique active `SegmentUpdater`. // the unique active `SegmentUpdater`.
active_metas: RwLock<Arc<IndexMeta>>, active_metas: RwLock<Arc<IndexMeta>>,
pool: CpuPool, pool: ThreadPool,
merge_thread_pool: ThreadPool,
index: Index, index: Index,
segment_manager: SegmentManager, segment_manager: SegmentManager,
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>, merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
killed: AtomicBool, killed: AtomicBool,
stamper: Stamper, stamper: Stamper,
merge_operations: MergeOperationInventory, merge_operations: MergeOperationInventory,
@@ -156,22 +168,31 @@ impl SegmentUpdater {
index: Index, index: Index,
stamper: Stamper, stamper: Stamper,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
) -> Result<SegmentUpdater> { ) -> crate::Result<SegmentUpdater> {
let segments = index.searchable_segment_metas()?; let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor); let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
let pool = CpuPoolBuilder::new() let pool = ThreadPoolBuilder::new()
.name_prefix("segment_updater") .name_prefix("segment_updater")
.pool_size(1) .pool_size(1)
.create(); .create()
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment updater thread".to_string())
})?;
let merge_thread_pool = ThreadPoolBuilder::new()
.name_prefix("merge_thread")
.pool_size(NUM_MERGE_THREADS)
.create()
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment merging thread".to_string())
})?;
let index_meta = index.load_metas()?; let index_meta = index.load_metas()?;
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater { Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
active_metas: RwLock::new(Arc::new(index_meta)), active_metas: RwLock::new(Arc::new(index_meta)),
pool, pool,
merge_thread_pool,
index, index,
segment_manager, segment_manager,
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))), merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
merging_thread_id: AtomicUsize::default(),
merging_threads: RwLock::new(HashMap::new()),
killed: AtomicBool::new(false), killed: AtomicBool::new(false),
stamper, stamper,
merge_operations: Default::default(), merge_operations: Default::default(),
@@ -179,65 +200,82 @@ impl SegmentUpdater {
} }
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> { pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.0.merge_policy.read().unwrap().clone() self.merge_policy.read().unwrap().clone()
} }
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
let arc_merge_policy = Arc::new(merge_policy); let arc_merge_policy = Arc::new(merge_policy);
*self.0.merge_policy.write().unwrap() = arc_merge_policy; *self.merge_policy.write().unwrap() = arc_merge_policy;
} }
fn get_merging_thread_id(&self) -> usize { fn schedule_future<T: 'static + Send, F: Future<Output = crate::Result<T>> + 'static + Send>(
self.0.merging_thread_id.fetch_add(1, Ordering::SeqCst)
}
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>(
&self, &self,
f: F, f: F,
) -> CpuFuture<T, TantivyError> { ) -> impl Future<Output = crate::Result<T>> {
let me_clone = self.clone(); let (sender, receiver) = oneshot::channel();
self.0.pool.spawn_fn(move || Ok(f(me_clone))) if self.is_alive() {
self.pool.spawn_ok(async move {
let _ = sender.send(f.await);
});
} else {
let _ = sender.send(Err(crate::TantivyError::SystemError(
"Segment updater killed".to_string(),
)));
}
receiver.unwrap_or_else(|_| {
let err_msg =
"A segment_updater future did not success. This should never happen.".to_string();
Err(crate::Error::SystemError(err_msg))
})
} }
pub fn add_segment(&self, segment_entry: SegmentEntry) { pub fn schedule_add_segment(
self.run_async(|segment_updater| { &self,
segment_updater.0.segment_manager.add_segment(segment_entry); segment_entry: SegmentEntry,
segment_updater.consider_merge_options(); ) -> impl Future<Output = crate::Result<()>> {
let segment_updater = self.clone();
self.schedule_future(async move {
segment_updater.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options().await;
Ok(())
}) })
.forget();
} }
/// Orders `SegmentManager` to remove all segments /// Orders `SegmentManager` to remove all segments
pub(crate) fn remove_all_segments(&self) { pub(crate) fn remove_all_segments(&self) {
self.0.segment_manager.remove_all_segments(); self.segment_manager.remove_all_segments();
} }
pub fn kill(&mut self) { pub fn kill(&mut self) {
self.0.killed.store(true, Ordering::Release); self.killed.store(true, Ordering::Release);
} }
pub fn is_alive(&self) -> bool { pub fn is_alive(&self) -> bool {
!self.0.killed.load(Ordering::Acquire) !self.killed.load(Ordering::Acquire)
} }
/// Apply deletes up to the target opstamp to all segments. /// Apply deletes up to the target opstamp to all segments.
/// ///
/// The method returns copies of the segment entries, /// The method returns copies of the segment entries,
/// updated with the delete information. /// updated with the delete information.
fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> { fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
let mut segment_entries = self.0.segment_manager.segment_entries(); let mut segment_entries = self.segment_manager.segment_entries();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
let segment = self.0.index.segment(segment_entry.meta().clone()); let segment = self.index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?; advance_deletes(segment, segment_entry, target_opstamp)?;
} }
Ok(segment_entries) Ok(segment_entries)
} }
pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) { pub fn save_metas(
&self,
opstamp: Opstamp,
commit_message: Option<String>,
) -> crate::Result<()> {
if self.is_alive() { if self.is_alive() {
let index = &self.0.index; let index = &self.index;
let directory = index.directory(); let directory = index.directory();
let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas(); let mut commited_segment_metas = self.segment_manager.committed_segment_metas();
// We sort segment_readers by number of documents. // We sort segment_readers by number of documents.
// This is an heuristic to make multithreading more efficient. // This is an heuristic to make multithreading more efficient.
@@ -259,16 +297,18 @@ impl SegmentUpdater {
opstamp, opstamp,
payload: commit_message, payload: commit_message,
}; };
save_metas(&index_meta, directory.box_clone().borrow_mut()) // TODO add context to the error.
.expect("Could not save metas."); save_metas(&index_meta, directory.box_clone().borrow_mut())?;
self.store_meta(&index_meta); self.store_meta(&index_meta);
} }
Ok(())
} }
pub fn garbage_collect_files(&self) -> CpuFuture<(), TantivyError> { pub fn schedule_garbage_collect(
self.run_async(move |segment_updater| { &self,
segment_updater.garbage_collect_files_exec(); ) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
}) let garbage_collect_future = garbage_collect_files(self.clone());
self.schedule_future(garbage_collect_future)
} }
/// List the files that are useful to the index. /// List the files that are useful to the index.
@@ -276,148 +316,130 @@ impl SegmentUpdater {
/// This does not include lock files, or files that are obsolete /// This does not include lock files, or files that are obsolete
/// but have not yet been deleted by the garbage collector. /// but have not yet been deleted by the garbage collector.
fn list_files(&self) -> HashSet<PathBuf> { fn list_files(&self) -> HashSet<PathBuf> {
let mut files = HashSet::new(); let mut files: HashSet<PathBuf> = self
.index
.list_all_segment_metas()
.into_iter()
.flat_map(|segment_meta| segment_meta.list_files())
.collect();
files.insert(META_FILEPATH.to_path_buf()); files.insert(META_FILEPATH.to_path_buf());
for segment_meta in self.0.index.list_all_segment_metas() {
files.extend(segment_meta.list_files());
}
files files
} }
fn garbage_collect_files_exec(&self) { pub fn schedule_commit(
info!("Running garbage collection"); &self,
let mut index = self.0.index.clone(); opstamp: Opstamp,
index.directory_mut().garbage_collect(|| self.list_files()); payload: Option<String>,
} ) -> impl Future<Output = crate::Result<()>> {
let segment_updater: SegmentUpdater = self.clone();
pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> { self.schedule_future(async move {
self.run_async(move |segment_updater| { let segment_entries = segment_updater.purge_deletes(opstamp)?;
if segment_updater.is_alive() { segment_updater.segment_manager.commit(segment_entries);
let segment_entries = segment_updater segment_updater.save_metas(opstamp, payload)?;
.purge_deletes(opstamp) let _ = garbage_collect_files(segment_updater.clone()).await;
.expect("Failed purge deletes"); segment_updater.consider_merge_options().await;
segment_updater.0.segment_manager.commit(segment_entries); Ok(())
segment_updater.save_metas(opstamp, payload);
segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options();
}
}) })
.wait()
}
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
let commit_opstamp = self.load_metas().opstamp;
let merge_operation = MergeOperation::new(
&self.0.merge_operations,
commit_opstamp,
segment_ids.to_vec(),
);
self.run_async(move |segment_updater| segment_updater.start_merge_impl(merge_operation))
.wait()?
} }
fn store_meta(&self, index_meta: &IndexMeta) { fn store_meta(&self, index_meta: &IndexMeta) {
*self.0.active_metas.write().unwrap() = Arc::new(index_meta.clone()); *self.active_metas.write().unwrap() = Arc::new(index_meta.clone());
}
fn load_metas(&self) -> Arc<IndexMeta> {
self.0.active_metas.read().unwrap().clone()
} }
fn load_metas(&self) -> Arc<IndexMeta> {
self.active_metas.read().unwrap().clone()
}
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
let commit_opstamp = self.load_metas().opstamp;
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
}
// Starts a merge operation. This function will block until the merge operation is effectively
// started. Note that it does not wait for the merge to terminate.
// The calling thread should not be block for a long time, as this only involve waiting for the
// `SegmentUpdater` queue which in turns only contains lightweight operations.
//
// The merge itself happens on a different thread.
//
// When successful, this function returns a `Future` for a `Result<SegmentMeta>` that represents
// the actual outcome of the merge operation.
//
// It returns an error if for some reason the merge operation could not be started.
//
// At this point an error is not necessarily the sign of a malfunction.
// (e.g. A rollback could have happened, between the instant when the merge operaiton was
// suggested and the moment when it ended up being executed.)
//
// `segment_ids` is required to be non-empty. // `segment_ids` is required to be non-empty.
fn start_merge_impl(&self, merge_operation: MergeOperation) -> Result<Receiver<SegmentMeta>> { pub fn start_merge(
&self,
merge_operation: MergeOperation,
) -> crate::Result<impl Future<Output = crate::Result<SegmentMeta>>> {
assert!( assert!(
!merge_operation.segment_ids().is_empty(), !merge_operation.segment_ids().is_empty(),
"Segment_ids cannot be empty." "Segment_ids cannot be empty."
); );
let segment_updater_clone = self.clone(); let segment_updater = self.clone();
let segment_entries: Vec<SegmentEntry> = self let segment_entries: Vec<SegmentEntry> = self
.0
.segment_manager .segment_manager
.start_merge(merge_operation.segment_ids())?; .start_merge(merge_operation.segment_ids())?;
// let segment_ids_vec = merge_operation.segment_ids.to_vec(); info!("Starting merge - {:?}", merge_operation.segment_ids());
let merging_thread_id = self.get_merging_thread_id(); let (merging_future_send, merging_future_recv) =
info!( oneshot::channel::<crate::Result<SegmentMeta>>();
"Starting merge thread #{} - {:?}",
merging_thread_id,
merge_operation.segment_ids()
);
let (merging_future_send, merging_future_recv) = oneshot();
// first we need to apply deletes to our segment. self.merge_thread_pool.spawn_ok(async move {
let merging_join_handle = thread::Builder::new() // The fact that `merge_operation` is moved here is important.
.name(format!("mergingthread-{}", merging_thread_id)) // Its lifetime is used to track how many merging thread are currently running,
.spawn(move || { // as well as which segment is currently in merge and therefore should not be
// first we need to apply deletes to our segment. // candidate for another merge.
let merge_result = perform_merge( match merge(
&merge_operation, &segment_updater.index,
&segment_updater_clone.0.index, segment_entries,
segment_entries, merge_operation.target_opstamp(),
); ) {
Ok(after_merge_segment_entry) => {
match merge_result { let segment_meta = segment_updater
Ok(after_merge_segment_entry) => { .end_merge(merge_operation, after_merge_segment_entry)
let merged_segment_meta = after_merge_segment_entry.meta().clone(); .await;
segment_updater_clone let _send_result = merging_future_send.send(segment_meta);
.end_merge(merge_operation, after_merge_segment_entry) }
.expect("Segment updater thread is corrupted."); Err(e) => {
warn!(
// the future may fail if the listener of the oneshot future "Merge of {:?} was cancelled: {:?}",
// has been destroyed. merge_operation.segment_ids().to_vec(),
// e
// This is not a problem here, so we just ignore any );
// possible error. // ... cancel merge
let _merging_future_res = merging_future_send.send(merged_segment_meta); if cfg!(test) {
} panic!("Merge failed.");
Err(e) => {
warn!(
"Merge of {:?} was cancelled: {:?}",
merge_operation.segment_ids(),
e
);
// ... cancel merge
if cfg!(test) {
panic!("Merge failed.");
}
// As `merge_operation` will be dropped, the segment in merge state will
// be available for merge again.
// `merging_future_send` will be dropped, sending an error to the future.
} }
} }
segment_updater_clone }
.0 });
.merging_threads
.write() Ok(merging_future_recv
.unwrap() .unwrap_or_else(|_| Err(crate::Error::SystemError("Merge failed".to_string()))))
.remove(&merging_thread_id);
Ok(())
})
.expect("Failed to spawn a thread.");
self.0
.merging_threads
.write()
.unwrap()
.insert(merging_thread_id, merging_join_handle);
Ok(merging_future_recv)
} }
fn consider_merge_options(&self) { async fn consider_merge_options(&self) {
let merge_segment_ids: HashSet<SegmentId> = self.0.merge_operations.segment_in_merge(); let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
let (committed_segments, uncommitted_segments) = let (committed_segments, uncommitted_segments) =
get_mergeable_segments(&merge_segment_ids, &self.0.segment_manager); get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
// Committed segments cannot be merged with uncommitted_segments. // Committed segments cannot be merged with uncommitted_segments.
// We therefore consider merges using these two sets of segments independently. // We therefore consider merges using these two sets of segments independently.
let merge_policy = self.get_merge_policy(); let merge_policy = self.get_merge_policy();
let current_opstamp = self.0.stamper.stamp(); let current_opstamp = self.stamper.stamp();
let mut merge_candidates: Vec<MergeOperation> = merge_policy let mut merge_candidates: Vec<MergeOperation> = merge_policy
.compute_merge_candidates(&uncommitted_segments) .compute_merge_candidates(&uncommitted_segments)
.into_iter() .into_iter()
.map(|merge_candidate| { .map(|merge_candidate| {
MergeOperation::new(&self.0.merge_operations, current_opstamp, merge_candidate.0) MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0)
}) })
.collect(); .collect();
@@ -425,25 +447,18 @@ impl SegmentUpdater {
let committed_merge_candidates = merge_policy let committed_merge_candidates = merge_policy
.compute_merge_candidates(&committed_segments) .compute_merge_candidates(&committed_segments)
.into_iter() .into_iter()
.map(|merge_candidate| { .map(|merge_candidate: MergeCandidate| {
MergeOperation::new(&self.0.merge_operations, commit_opstamp, merge_candidate.0) MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter()); merge_candidates.extend(committed_merge_candidates.into_iter());
for merge_operation in merge_candidates { for merge_operation in merge_candidates {
match self.start_merge_impl(merge_operation) { if let Err(err) = self.start_merge(merge_operation) {
Ok(merge_future) => { warn!(
if let Err(e) = merge_future.fuse().poll() { "Starting the merge failed for the following reason. This is not fatal. {}",
error!("The merge task failed quickly after starting: {:?}", e); err
} );
}
Err(err) => {
warn!(
"Starting the merge failed for the following reason. This is not fatal. {}",
err
);
}
} }
} }
} }
@@ -452,15 +467,17 @@ impl SegmentUpdater {
&self, &self,
merge_operation: MergeOperation, merge_operation: MergeOperation,
mut after_merge_segment_entry: SegmentEntry, mut after_merge_segment_entry: SegmentEntry,
) -> Result<()> { ) -> impl Future<Output = crate::Result<SegmentMeta>> {
self.run_async(move |segment_updater| { let segment_updater = self.clone();
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
let end_merge_future = self.schedule_future(async move {
info!("End merge {:?}", after_merge_segment_entry.meta()); info!("End merge {:?}", after_merge_segment_entry.meta());
{ {
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone(); let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
if let Some(delete_operation) = delete_cursor.get() { if let Some(delete_operation) = delete_cursor.get() {
let committed_opstamp = segment_updater.load_metas().opstamp; let committed_opstamp = segment_updater.load_metas().opstamp;
if delete_operation.opstamp < committed_opstamp { if delete_operation.opstamp < committed_opstamp {
let index = &segment_updater.0.index; let index = &segment_updater.index;
let segment = index.segment(after_merge_segment_entry.meta().clone()); let segment = index.segment(after_merge_segment_entry.meta().clone());
if let Err(e) = advance_deletes( if let Err(e) = advance_deletes(
segment, segment,
@@ -478,21 +495,26 @@ impl SegmentUpdater {
// ... cancel merge // ... cancel merge
// `merge_operations` are tracked. As it is dropped, the // `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge. // the segment_ids will be available again for merge.
return; return Err(e);
} }
} }
} }
let previous_metas = segment_updater.load_metas(); let previous_metas = segment_updater.load_metas();
segment_updater let segments_status = segment_updater
.0
.segment_manager .segment_manager
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry); .end_merge(merge_operation.segment_ids(), after_merge_segment_entry)?;
segment_updater.consider_merge_options();
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone()); if segments_status == SegmentsStatus::Committed {
segment_updater
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
}
segment_updater.consider_merge_options().await;
} // we drop all possible handle to a now useless `SegmentMeta`. } // we drop all possible handle to a now useless `SegmentMeta`.
segment_updater.garbage_collect_files_exec(); let _ = garbage_collect_files(segment_updater).await;
}) Ok(())
.wait() });
end_merge_future.map_ok(|_| after_merge_segment_meta)
} }
/// Wait for current merging threads. /// Wait for current merging threads.
@@ -510,26 +532,9 @@ impl SegmentUpdater {
/// ///
/// Obsolete files will eventually be cleaned up /// Obsolete files will eventually be cleaned up
/// by the directory garbage collector. /// by the directory garbage collector.
pub fn wait_merging_thread(&self) -> Result<()> { pub fn wait_merging_thread(&self) -> crate::Result<()> {
loop { self.merge_operations.wait_until_empty();
let merging_threads: HashMap<usize, JoinHandle<Result<()>>> = { Ok(())
let mut merging_threads = self.0.merging_threads.write().unwrap();
mem::replace(merging_threads.deref_mut(), HashMap::new())
};
if merging_threads.is_empty() {
return Ok(());
}
debug!("wait merging thread {}", merging_threads.len());
for (_, merging_thread_handle) in merging_threads {
merging_thread_handle
.join()
.map(|_| ())
.map_err(|_| TantivyError::ErrorInThread("Merging thread failed.".into()))?;
}
// Our merging thread may have queued their completed merged segment.
// Let's wait for that too.
self.run_async(move |_| {}).wait()?;
}
} }
} }
@@ -685,7 +690,6 @@ mod tests {
index_writer.segment_updater().remove_all_segments(); index_writer.segment_updater().remove_all_segments();
let seg_vec = index_writer let seg_vec = index_writer
.segment_updater() .segment_updater()
.0
.segment_manager .segment_manager
.segment_entries(); .segment_entries();
assert!(seg_vec.is_empty()); assert!(seg_vec.is_empty());

View File

@@ -3,7 +3,7 @@ use crate::core::Segment;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter; use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::{SegmentSerializer, SegmentSerializerWriters};
use crate::postings::compute_table_size; use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldType; use crate::schema::FieldType;
@@ -69,7 +69,8 @@ impl SegmentWriter {
schema: &Schema, schema: &Schema,
) -> Result<SegmentWriter> { ) -> Result<SegmentWriter> {
let table_num_bits = initial_table_size(memory_budget)?; let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?; let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut segment)?;
let segment_serializer = SegmentSerializer::new(segment.schema(), segment_serializer_wrts)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema let tokenizers = schema
.fields() .fields()
@@ -249,6 +250,7 @@ impl SegmentWriter {
} }
} }
doc.filter_fields(|field| schema.get_field_entry(field).is_stored()); doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
doc.prepare_for_store();
let doc_writer = self.segment_serializer.get_store_writer(); let doc_writer = self.segment_serializer.get_store_writer();
doc_writer.store(&doc)?; doc_writer.store(&doc)?;
self.max_doc += 1; self.max_doc += 1;

137
src/lib.rs Executable file → Normal file
View File

@@ -160,7 +160,6 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::SegmentComponent; pub use crate::core::SegmentComponent;
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
@@ -170,11 +169,58 @@ pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings; pub use crate::postings::Postings;
pub use crate::reader::LeasedItem; pub use crate::reader::LeasedItem;
pub use crate::schema::{Document, Term}; pub use crate::schema::{Document, Term};
use std::fmt;
/// Expose the current version of tantivy, as well use once_cell::sync::Lazy;
/// whether it was compiled with the simd compression.
pub fn version() -> &'static str { /// Index format version.
env!("CARGO_PKG_VERSION") const INDEX_FORMAT_VERSION: u32 = 1;
/// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Version {
major: u32,
minor: u32,
patch: u32,
index_format_version: u32,
store_compression: String,
}
impl fmt::Debug for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
static VERSION: Lazy<Version> = Lazy::new(|| Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
index_format_version: INDEX_FORMAT_VERSION,
store_compression: crate::store::COMPRESSION.to_string(),
});
impl ToString for Version {
fn to_string(&self) -> String {
format!(
"tantivy v{}.{}.{}, index_format v{}, store_compression: {}",
self.major, self.minor, self.patch, self.index_format_version, self.store_compression
)
}
}
static VERSION_STRING: Lazy<String> = Lazy::new(|| VERSION.to_string());
/// Expose the current version of tantivy as found in Cargo.toml during compilation.
/// eg. "0.11.0" as well as the compression scheme used in the docstore.
pub fn version() -> &'static Version {
&VERSION
}
/// Exposes the complete version of tantivy as found in Cargo.toml during compilation as a string.
/// eg. "tantivy v0.11.0, index_format v1, store_compression: lz4".
pub fn version_string() -> &'static str {
VERSION_STRING.as_str()
} }
/// Defines tantivy's merging strategy /// Defines tantivy's merging strategy
@@ -287,6 +333,18 @@ mod tests {
sample_with_seed(n, ratio, 4) sample_with_seed(n, ratio, 4)
} }
#[test]
#[cfg(not(feature = "lz4"))]
fn test_version_string() {
use regex::Regex;
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
let version = super::version().to_string();
assert!(regex_ptn.find(&version).is_some());
}
#[test] #[test]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() { fn test_indexing() {
@@ -882,4 +940,73 @@ mod tests {
assert_eq!(fast_field_reader.get(0), 4f64) assert_eq!(fast_field_reader.get(0), 4f64)
} }
} }
// motivated by #729
#[test]
fn test_update_via_delete_insert() {
use crate::collector::Count;
use crate::indexer::NoMergePolicy;
use crate::query::AllQuery;
use crate::SegmentId;
use futures::executor::block_on;
const DOC_COUNT: u64 = 2u64;
let mut schema_builder = SchemaBuilder::default();
let id = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index_reader = index.reader().unwrap();
let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
for doc_id in 0u64..DOC_COUNT {
index_writer.add_document(doc!(id => doc_id));
}
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
// update the 10 elements by deleting and re-adding
for doc_id in 0u64..DOC_COUNT {
index_writer.delete_term(Term::from_field_u64(id, doc_id));
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let doc = doc!(id => doc_id);
index_writer.add_document(doc);
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
// The number of document should be stable.
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
}
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.into_iter()
.map(|reader| reader.segment_id())
.collect();
block_on(index_writer.merge(&segment_ids)).unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
}
} }

View File

@@ -35,9 +35,9 @@
/// let likes = schema_builder.add_u64_field("num_u64", FAST); /// let likes = schema_builder.add_u64_field("num_u64", FAST);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let doc = doc!( /// let doc = doc!(
/// title => "Life Aquatic", /// title => "Life Aquatic",
/// author => "Wes Anderson", /// author => "Wes Anderson",
/// likes => 4u64 /// likes => 4u64
/// ); /// );
/// # } /// # }
/// ``` /// ```

View File

@@ -36,11 +36,10 @@ struct Positions {
impl Positions { impl Positions {
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions { pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
let skip_len = skip_source.len(); let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
let (body, footer) = skip_source.split(skip_len - u32::SIZE_IN_BYTES);
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted"); let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
let body_split = body.len() - u64::SIZE_IN_BYTES * (num_long_skips as usize); let (skip_source, long_skip_source) =
let (skip_source, long_skip_source) = body.split(body_split); body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
Positions { Positions {
bit_packer: BitPacker4x::new(), bit_packer: BitPacker4x::new(),
skip_source, skip_source,

View File

@@ -75,7 +75,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut segment = index.new_segment(); let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap(); let mut posting_serializer = InvertedIndexSerializer::for_segment(&mut segment).unwrap();
{ {
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap(); let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
field_serializer.new_term("abc".as_bytes()).unwrap(); field_serializer.new_term("abc".as_bytes()).unwrap();

View File

@@ -10,8 +10,8 @@ use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId;
use crate::Result; use crate::Result;
use crate::{Directory, DocId};
use std::io::{self, Write}; use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
@@ -54,33 +54,36 @@ pub struct InvertedIndexSerializer {
} }
impl InvertedIndexSerializer { impl InvertedIndexSerializer {
/// Open a new `InvertedIndexSerializer` for the given segment pub(crate) fn for_segment(segment: &mut Segment) -> crate::Result<Self> {
fn create( let schema = segment.schema();
terms_write: CompositeWrite<WritePtr>, use crate::core::SegmentComponent;
postings_write: CompositeWrite<WritePtr>, let terms_wrt = segment.open_write(SegmentComponent::TERMS)?;
positions_write: CompositeWrite<WritePtr>, let postings_wrt = segment.open_write(SegmentComponent::POSTINGS)?;
positionsidx_write: CompositeWrite<WritePtr>, let positions_wrt = segment.open_write(SegmentComponent::POSITIONS)?;
schema: Schema, let positions_idx_wrt = segment.open_write(SegmentComponent::POSITIONSSKIP)?;
) -> Result<InvertedIndexSerializer> { Ok(Self::open(
Ok(InvertedIndexSerializer {
terms_write,
postings_write,
positions_write,
positionsidx_write,
schema, schema,
}) terms_wrt,
postings_wrt,
positions_wrt,
positions_idx_wrt,
))
} }
/// Open a new `PostingsSerializer` for the given segment /// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> { pub(crate) fn open(
use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS}; schema: Schema,
InvertedIndexSerializer::create( terms_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(TERMS)?), postings_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(POSTINGS)?), positions_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(POSITIONS)?), positions_idx_wrt: WritePtr,
CompositeWrite::wrap(segment.open_write(POSITIONSSKIP)?), ) -> InvertedIndexSerializer {
segment.schema(), InvertedIndexSerializer {
) terms_write: CompositeWrite::wrap(terms_wrt),
postings_write: CompositeWrite::wrap(postings_wrt),
positions_write: CompositeWrite::wrap(positions_wrt),
positionsidx_write: CompositeWrite::wrap(positions_idx_wrt),
schema,
}
} }
/// Must be called before starting pushing terms of /// Must be called before starting pushing terms of

View File

@@ -54,21 +54,21 @@ where
match self.excluding_state { match self.excluding_state {
State::ExcludeOne(excluded_doc) => { State::ExcludeOne(excluded_doc) => {
if doc == excluded_doc { if doc == excluded_doc {
false return false;
} else if excluded_doc > doc { }
true if excluded_doc > doc {
} else { return true;
match self.excluding_docset.skip_next(doc) { }
SkipResult::OverStep => { match self.excluding_docset.skip_next(doc) {
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc()); SkipResult::OverStep => {
true self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
} true
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
} }
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
} }
} }
State::Finished => true, State::Finished => true,

View File

@@ -33,7 +33,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result, Term}; /// use tantivy::{doc, Index, Result, Term};
/// ///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
@@ -59,7 +58,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
/// ///
/// { /// {
///
/// let term = Term::from_field_text(title, "Diary"); /// let term = Term::from_field_text(title, "Diary");
/// let query = FuzzyTermQuery::new(term, 1, true); /// let query = FuzzyTermQuery::new(term, 1, true);
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap(); /// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
@@ -69,6 +67,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// # assert!(example().is_ok());
/// ``` /// ```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct FuzzyTermQuery { pub struct FuzzyTermQuery {

View File

@@ -4,6 +4,7 @@ use crate::postings::Postings;
use crate::query::bm25::BM25Weight; use crate::query::bm25::BM25Weight;
use crate::query::{Intersection, Scorer}; use crate::query::{Intersection, Scorer};
use crate::DocId; use crate::DocId;
use std::cmp::Ordering;
struct PostingsWithOffset<TPostings> { struct PostingsWithOffset<TPostings> {
offset: u32, offset: u32,
@@ -59,12 +60,16 @@ fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
while left_i < left.len() && right_i < right.len() { while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
if left_val < right_val { match left_val.cmp(&right_val) {
left_i += 1; Ordering::Less => {
} else if right_val < left_val { left_i += 1;
right_i += 1; }
} else { Ordering::Equal => {
return true; return true;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
false false
@@ -77,14 +82,18 @@ fn intersection_count(left: &[u32], right: &[u32]) -> usize {
while left_i < left.len() && right_i < right.len() { while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
if left_val < right_val { match left_val.cmp(&right_val) {
left_i += 1; Ordering::Less => {
} else if right_val < left_val { left_i += 1;
right_i += 1; }
} else { Ordering::Equal => {
count += 1; count += 1;
left_i += 1; left_i += 1;
right_i += 1; right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
count count
@@ -103,15 +112,19 @@ fn intersection(left: &mut [u32], right: &[u32]) -> usize {
while left_i < left_len && right_i < right_len { while left_i < left_len && right_i < right_len {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
if left_val < right_val { match left_val.cmp(&right_val) {
left_i += 1; Ordering::Less => {
} else if right_val < left_val { left_i += 1;
right_i += 1; }
} else { Ordering::Equal => {
left[count] = left_val; left[count] = left_val;
count += 1; count += 1;
left_i += 1; left_i += 1;
right_i += 1; right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
count count

View File

@@ -8,7 +8,7 @@ use crate::query::PhraseQuery;
use crate::query::Query; use crate::query::Query;
use crate::query::RangeQuery; use crate::query::RangeQuery;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::IndexRecordOption; use crate::schema::{Facet, IndexRecordOption};
use crate::schema::{Field, Schema}; use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term}; use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager; use crate::tokenizer::TokenizerManager;
@@ -319,7 +319,10 @@ impl QueryParser {
)) ))
} }
} }
FieldType::HierarchicalFacet => Ok(vec![(0, Term::from_field_text(field, phrase))]), FieldType::HierarchicalFacet => {
let facet = Facet::from_text(phrase);
Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))])
}
FieldType::Bytes => { FieldType::Bytes => {
let field_name = self.schema.get_field_name(field).to_string(); let field_name = self.schema.get_field_name(field).to_string();
Err(QueryParserError::FieldNotIndexed(field_name)) Err(QueryParserError::FieldNotIndexed(field_name))
@@ -554,6 +557,7 @@ mod test {
schema_builder.add_text_field("with_stop_words", text_options); schema_builder.add_text_field("with_stop_words", text_options);
schema_builder.add_date_field("date", INDEXED); schema_builder.add_date_field("date", INDEXED);
schema_builder.add_f64_field("float", INDEXED); schema_builder.add_f64_field("float", INDEXED);
schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let default_fields = vec![title, text]; let default_fields = vec![title, text];
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
@@ -588,9 +592,13 @@ mod test {
} }
#[test] #[test]
pub fn test_parse_query_simple() { pub fn test_parse_query_facet() {
let query_parser = make_query_parser(); let query_parser = make_query_parser();
assert!(query_parser.parse_query("toto").is_ok()); let query = query_parser.parse_query("facet:/root/branch/leaf").unwrap();
assert_eq!(
format!("{:?}", query),
"TermQuery(Term(field=11,bytes=[114, 111, 111, 116, 0, 98, 114, 97, 110, 99, 104, 0, 108, 101, 97, 102]))"
);
} }
#[test] #[test]

View File

@@ -38,41 +38,33 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// # use tantivy::collector::Count; /// use tantivy::collector::Count;
/// # use tantivy::query::RangeQuery; /// use tantivy::query::RangeQuery;
/// # use tantivy::schema::{Schema, INDEXED}; /// use tantivy::schema::{Schema, INDEXED};
/// # use tantivy::{doc, Index, Result}; /// use tantivy::{doc, Index};
/// # /// # fn test() -> tantivy::Result<()> {
/// # fn run() -> Result<()> { /// let mut schema_builder = Schema::builder();
/// # let mut schema_builder = Schema::builder(); /// let year_field = schema_builder.add_u64_field("year", INDEXED);
/// # let year_field = schema_builder.add_u64_field("year", INDEXED); /// let schema = schema_builder.build();
/// # let schema = schema_builder.build(); ///
/// # /// let index = Index::create_in_ram(schema);
/// # let index = Index::create_in_ram(schema); /// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
/// # { /// for year in 1950u64..2017u64 {
/// # let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap(); /// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
/// # for year in 1950u64..2017u64 { /// for _ in 0..num_docs_within_year {
/// # let num_docs_within_year = 10 + (year - 1950) * (year - 1950); /// index_writer.add_document(doc!(year_field => year));
/// # for _ in 0..num_docs_within_year { /// }
/// # index_writer.add_document(doc!(year_field => year)); /// }
/// # } /// index_writer.commit()?;
/// # } ///
/// # index_writer.commit().unwrap(); /// let reader = index.reader()?;
/// # }
/// # let reader = index.reader()?;
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
///
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970); /// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
///
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?; /// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
/// /// assert_eq!(num_60s_books, 2285);
/// # assert_eq!(num_60s_books, 2285); /// Ok(())
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # run().unwrap()
/// # } /// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RangeQuery { pub struct RangeQuery {

View File

@@ -15,40 +15,40 @@ use tantivy_fst::Regex;
/// use tantivy::collector::Count; /// use tantivy::collector::Count;
/// use tantivy::query::RegexQuery; /// use tantivy::query::RegexQuery;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result, Term}; /// use tantivy::{doc, Index, Term};
/// ///
/// # fn main() { example().unwrap(); } /// # fn test() -> tantivy::Result<()> {
/// fn example() -> Result<()> { /// let mut schema_builder = Schema::builder();
/// let mut schema_builder = Schema::builder(); /// let title = schema_builder.add_text_field("title", TEXT);
/// let title = schema_builder.add_text_field("title", TEXT); /// let schema = schema_builder.build();
/// let schema = schema_builder.build(); /// let index = Index::create_in_ram(schema);
/// let index = Index::create_in_ram(schema); /// {
/// { /// let mut index_writer = index.writer(3_000_000)?;
/// let mut index_writer = index.writer(3_000_000)?; /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "The Name of the Wind",
/// title => "The Name of the Wind", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "The Diary of Muadib",
/// title => "The Diary of Muadib", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "A Dairy Cow",
/// title => "A Dairy Cow", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "The Diary of a Young Girl",
/// title => "The Diary of a Young Girl", /// ));
/// )); /// index_writer.commit().unwrap();
/// index_writer.commit().unwrap();
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// } /// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct RegexQuery { pub struct RegexQuery {

View File

@@ -23,42 +23,39 @@ use std::fmt;
/// use tantivy::collector::{Count, TopDocs}; /// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::TermQuery; /// use tantivy::query::TermQuery;
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption}; /// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
/// use tantivy::{doc, Index, Result, Term}; /// use tantivy::{doc, Index, Term};
/// /// # fn test() -> tantivy::Result<()> {
/// # fn main() { example().unwrap(); } /// let mut schema_builder = Schema::builder();
/// fn example() -> Result<()> { /// let title = schema_builder.add_text_field("title", TEXT);
/// let mut schema_builder = Schema::builder(); /// let schema = schema_builder.build();
/// let title = schema_builder.add_text_field("title", TEXT); /// let index = Index::create_in_ram(schema);
/// let schema = schema_builder.build(); /// {
/// let index = Index::create_in_ram(schema); /// let mut index_writer = index.writer(3_000_000)?;
/// { /// index_writer.add_document(doc!(
/// let mut index_writer = index.writer(3_000_000)?; /// title => "The Name of the Wind",
/// index_writer.add_document(doc!( /// ));
/// title => "The Name of the Wind", /// index_writer.add_document(doc!(
/// )); /// title => "The Diary of Muadib",
/// index_writer.add_document(doc!( /// ));
/// title => "The Diary of Muadib", /// index_writer.add_document(doc!(
/// )); /// title => "A Dairy Cow",
/// index_writer.add_document(doc!( /// ));
/// title => "A Dairy Cow", /// index_writer.add_document(doc!(
/// )); /// title => "The Diary of a Young Girl",
/// index_writer.add_document(doc!( /// ));
/// title => "The Diary of a Young Girl", /// index_writer.commit()?;
/// ));
/// index_writer.commit()?;
/// }
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
/// assert_eq!(count, 2);
///
/// Ok(())
/// } /// }
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count))?;
/// assert_eq!(count, 2);
/// Ok(())
/// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Clone)] #[derive(Clone)]
pub struct TermQuery { pub struct TermQuery {

View File

@@ -162,6 +162,11 @@ pub struct IndexReader {
} }
impl IndexReader { impl IndexReader {
#[cfg(test)]
pub(crate) fn index(&self) -> Index {
self.inner.index.clone()
}
/// Update searchers so that they reflect the state of the last /// Update searchers so that they reflect the state of the last
/// `.commit()`. /// `.commit()`.
/// ///

View File

@@ -167,7 +167,7 @@ mod tests {
use super::Pool; use super::Pool;
use super::Queue; use super::Queue;
use std::iter; use std::{iter, mem};
#[test] #[test]
fn test_pool() { fn test_pool() {
@@ -197,33 +197,67 @@ mod tests {
fn test_pool_dont_panic_on_empty_pop() { fn test_pool_dont_panic_on_empty_pop() {
// When the object pool is exhausted, it shouldn't panic on pop() // When the object pool is exhausted, it shouldn't panic on pop()
use std::sync::Arc; use std::sync::Arc;
use std::{thread, time}; use std::thread;
// Wrap the pool in an Arc, same way as its used in `core/index.rs` // Wrap the pool in an Arc, same way as its used in `core/index.rs`
let pool = Arc::new(Pool::new()); let pool1 = Arc::new(Pool::new());
// clone pools outside the move scope of each new thread // clone pools outside the move scope of each new thread
let pool1 = Arc::clone(&pool); let pool2 = Arc::clone(&pool1);
let pool2 = Arc::clone(&pool); let pool3 = Arc::clone(&pool1);
let elements_for_pool = vec![1, 2]; let elements_for_pool = vec![1, 2];
pool.publish_new_generation(elements_for_pool); pool1.publish_new_generation(elements_for_pool);
let mut threads = vec![]; let mut threads = vec![];
let sleep_dur = time::Duration::from_millis(10);
// spawn one more thread than there are elements in the pool // spawn one more thread than there are elements in the pool
let (start_1_send, start_1_recv) = crossbeam::bounded(0);
let (start_2_send, start_2_recv) = crossbeam::bounded(0);
let (start_3_send, start_3_recv) = crossbeam::bounded(0);
let (event_send1, event_recv) = crossbeam::unbounded();
let event_send2 = event_send1.clone();
let event_send3 = event_send1.clone();
threads.push(thread::spawn(move || { threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called assert_eq!(start_1_recv.recv(), Ok("start"));
let _leased_searcher = &pool.acquire();
thread::sleep(sleep_dur);
}));
threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool1.acquire(); let _leased_searcher = &pool1.acquire();
thread::sleep(sleep_dur); assert!(event_send1.send("1 acquired").is_ok());
assert_eq!(start_1_recv.recv(), Ok("stop"));
assert!(event_send1.send("1 stopped").is_ok());
mem::drop(_leased_searcher);
})); }));
threads.push(thread::spawn(move || { threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called assert_eq!(start_2_recv.recv(), Ok("start"));
let _leased_searcher = &pool2.acquire(); let _leased_searcher = &pool2.acquire();
thread::sleep(sleep_dur); assert!(event_send2.send("2 acquired").is_ok());
assert_eq!(start_2_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);
assert!(event_send2.send("2 stopped").is_ok());
})); }));
threads.push(thread::spawn(move || {
assert_eq!(start_3_recv.recv(), Ok("start"));
let _leased_searcher = &pool3.acquire();
assert!(event_send3.send("3 acquired").is_ok());
assert_eq!(start_3_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);
assert!(event_send3.send("3 stopped").is_ok());
}));
assert!(start_1_send.send("start").is_ok());
assert_eq!(event_recv.recv(), Ok("1 acquired"));
assert!(start_2_send.send("start").is_ok());
assert_eq!(event_recv.recv(), Ok("2 acquired"));
assert!(start_3_send.send("start").is_ok());
assert!(event_recv.try_recv().is_err());
assert!(start_1_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("1 stopped"));
assert_eq!(event_recv.recv(), Ok("3 acquired"));
assert!(start_3_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("3 stopped"));
assert!(start_2_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("2 stopped"));
} }
} }

View File

@@ -155,6 +155,21 @@ impl Document {
.find(|field_value| field_value.field() == field) .find(|field_value| field_value.field() == field)
.map(FieldValue::value) .map(FieldValue::value)
} }
/// Prepares Document for being stored in the document store
///
/// Method transforms PreTokenizedString values into String
/// values.
pub fn prepare_for_store(&mut self) {
for field_value in &mut self.field_values {
if let Value::PreTokStr(pre_tokenized_text) = field_value.value() {
*field_value = FieldValue::new(
field_value.field(),
Value::Str(pre_tokenized_text.text.clone()), //< TODO somehow remove .clone()
);
}
}
}
} }
impl BinarySerializable for Document { impl BinarySerializable for Document {
@@ -180,6 +195,7 @@ impl BinarySerializable for Document {
mod tests { mod tests {
use crate::schema::*; use crate::schema::*;
use crate::tokenizer::{PreTokenizedString, Token};
#[test] #[test]
fn test_doc() { fn test_doc() {
@@ -189,4 +205,38 @@ mod tests {
doc.add_text(text_field, "My title"); doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1); assert_eq!(doc.field_values().len(), 1);
} }
#[test]
fn test_prepare_for_store() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("title", TEXT);
let mut doc = Document::default();
let pre_tokenized_text = PreTokenizedString {
text: String::from("A"),
tokens: vec![Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
}],
};
doc.add_pre_tokenized_text(text_field, &pre_tokenized_text);
doc.add_text(text_field, "title");
doc.prepare_for_store();
assert_eq!(doc.field_values().len(), 2);
match doc.field_values()[0].value() {
Value::Str(ref text) => assert_eq!(text, "A"),
_ => panic!("Incorrect variant of Value"),
}
match doc.field_values()[1].value() {
Value::Str(ref text) => assert_eq!(text, "title"),
_ => panic!("Incorrect variant of Value"),
}
}
} }

View File

@@ -6,6 +6,7 @@ use crate::schema::TextFieldIndexing;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{IntOptions, TextOptions}; use crate::schema::{IntOptions, TextOptions};
use crate::tokenizer::PreTokenizedString; use crate::tokenizer::PreTokenizedString;
use chrono::{FixedOffset, Utc};
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
/// Possible error that may occur while parsing a field value /// Possible error that may occur while parsing a field value
@@ -124,13 +125,20 @@ impl FieldType {
pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> { pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> {
match *json { match *json {
JsonValue::String(ref field_text) => match *self { JsonValue::String(ref field_text) => match *self {
FieldType::Str(_) => Ok(Value::Str(field_text.clone())), FieldType::Date(_) => {
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => { let dt_with_fixed_tz: chrono::DateTime<FixedOffset> =
Err(ValueParsingError::TypeError(format!( chrono::DateTime::parse_from_rfc3339(field_text).map_err(|err|
"Expected an integer, got {:?}", ValueParsingError::TypeError(format!(
json "Failed to parse date from JSON. Expected rfc3339 format, got {}. {:?}",
))) field_text, err
))
)?;
Ok(Value::Date(dt_with_fixed_tz.with_timezone(&Utc)))
} }
FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) => Err(
ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)),
),
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))), FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| { FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
ValueParsingError::InvalidBase64(format!( ValueParsingError::InvalidBase64(format!(
@@ -208,7 +216,35 @@ mod tests {
use crate::schema::field_type::ValueParsingError; use crate::schema::field_type::ValueParsingError;
use crate::schema::TextOptions; use crate::schema::TextOptions;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{Schema, INDEXED};
use crate::tokenizer::{PreTokenizedString, Token}; use crate::tokenizer::{PreTokenizedString, Token};
use crate::{DateTime, Document};
use chrono::{NaiveDate, NaiveDateTime, NaiveTime, Utc};
#[test]
fn test_deserialize_json_date() {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", INDEXED);
let schema = schema_builder.build();
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
let doc = schema.parse_document(doc_json).unwrap();
let date = doc.get_first(date_field).unwrap();
assert_eq!(format!("{:?}", date), "Date(2019-10-12T05:20:50.520Z)");
}
#[test]
fn test_serialize_json_date() {
let mut doc = Document::new();
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", INDEXED);
let schema = schema_builder.build();
let naive_date = NaiveDate::from_ymd(1982, 9, 17);
let naive_time = NaiveTime::from_hms(13, 20, 00);
let date_time = DateTime::from_utc(NaiveDateTime::new(naive_date, naive_time), Utc);
doc.add_date(date_field, &date_time);
let doc_json = schema.to_json(&doc);
assert_eq!(doc_json, r#"{"date":["1982-09-17T13:20:00+00:00"]}"#);
}
#[test] #[test]
fn test_bytes_value_from_json() { fn test_bytes_value_from_json() {

View File

@@ -53,7 +53,7 @@ where
fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output { fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output {
SchemaFlagList { SchemaFlagList {
head: head.head, head: head.head,
tail: self.clone(), tail: self,
} }
} }
} }

View File

@@ -44,7 +44,7 @@ We can split the problem of generating a search result page into two phases :
the search results page. (`doc_ids[] -> Document[]`) the search results page. (`doc_ids[] -> Document[]`)
In the first phase, the ability to search for documents by the given field is determined by the In the first phase, the ability to search for documents by the given field is determined by the
[`TextIndexingOptions`](enum.TextIndexingOptions.html) of our [`IndexRecordOption`](enum.IndexRecordOption.html) of our
[`TextOptions`](struct.TextOptions.html). [`TextOptions`](struct.TextOptions.html).
The effect of each possible setting is described more in detail The effect of each possible setting is described more in detail

View File

@@ -166,7 +166,7 @@ impl SchemaBuilder {
} }
/// Adds a field entry to the schema in build. /// Adds a field entry to the schema in build.
fn add_field(&mut self, field_entry: FieldEntry) -> Field { pub fn add_field(&mut self, field_entry: FieldEntry) -> Field {
let field = Field::from_field_id(self.fields.len() as u32); let field = Field::from_field_id(self.fields.len() as u32);
let field_name = field_entry.name().to_string(); let field_name = field_entry.name().to_string();
self.fields.push(field_entry); self.fields.push(field_entry);
@@ -401,6 +401,7 @@ pub enum DocParsingError {
mod tests { mod tests {
use crate::schema::field_type::ValueParsingError; use crate::schema::field_type::ValueParsingError;
use crate::schema::int_options::Cardinality::SingleValue;
use crate::schema::schema::DocParsingError::NotJSON; use crate::schema::schema::DocParsingError::NotJSON;
use crate::schema::*; use crate::schema::*;
use matches::{assert_matches, matches}; use matches::{assert_matches, matches};
@@ -715,4 +716,94 @@ mod tests {
assert_matches!(json_err, Err(NotJSON(_))); assert_matches!(json_err, Err(NotJSON(_)));
} }
} }
#[test]
pub fn test_schema_add_field() {
let mut schema_builder = SchemaBuilder::default();
let id_options = TextOptions::default().set_stored().set_indexing_options(
TextFieldIndexing::default()
.set_tokenizer("raw")
.set_index_option(IndexRecordOption::Basic),
);
let timestamp_options = IntOptions::default()
.set_stored()
.set_indexed()
.set_fast(SingleValue);
schema_builder.add_text_field("_id", id_options);
schema_builder.add_date_field("_timestamp", timestamp_options);
let schema_content = r#"[
{
"name": "text",
"type": "text",
"options": {
"indexing": {
"record": "position",
"tokenizer": "default"
},
"stored": false
}
},
{
"name": "popularity",
"type": "i64",
"options": {
"indexed": false,
"fast": "single",
"stored": true
}
}
]"#;
let tmp_schema: Schema =
serde_json::from_str(&schema_content).expect("error while reading json");
for (_field, field_entry) in tmp_schema.fields() {
schema_builder.add_field(field_entry.clone());
}
let schema = schema_builder.build();
let schema_json = serde_json::to_string_pretty(&schema).unwrap();
let expected = r#"[
{
"name": "_id",
"type": "text",
"options": {
"indexing": {
"record": "basic",
"tokenizer": "raw"
},
"stored": true
}
},
{
"name": "_timestamp",
"type": "date",
"options": {
"indexed": true,
"fast": "single",
"stored": true
}
},
{
"name": "text",
"type": "text",
"options": {
"indexing": {
"record": "position",
"tokenizer": "default"
},
"stored": false
}
},
{
"name": "popularity",
"type": "i64",
"options": {
"indexed": false,
"fast": "single",
"stored": true
}
}
]"#;
assert_eq!(schema_json, expected);
}
} }

View File

@@ -75,7 +75,7 @@ impl Serialize for Value {
Value::U64(u) => serializer.serialize_u64(u), Value::U64(u) => serializer.serialize_u64(u),
Value::I64(u) => serializer.serialize_i64(u), Value::I64(u) => serializer.serialize_i64(u),
Value::F64(u) => serializer.serialize_f64(u), Value::F64(u) => serializer.serialize_f64(u),
Value::Date(ref date) => serializer.serialize_i64(date.timestamp()), Value::Date(ref date) => serializer.serialize_str(&date.to_rfc3339()),
Value::Facet(ref facet) => facet.serialize(serializer), Value::Facet(ref facet) => facet.serialize(serializer),
Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes), Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes),
} }
@@ -96,14 +96,14 @@ impl<'de> Deserialize<'de> for Value {
formatter.write_str("a string or u32") formatter.write_str("a string or u32")
} }
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
Ok(Value::U64(v))
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> { fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
Ok(Value::I64(v)) Ok(Value::I64(v))
} }
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
Ok(Value::U64(v))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> { fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> {
Ok(Value::F64(v)) Ok(Value::F64(v))
} }
@@ -209,8 +209,8 @@ impl From<f64> for Value {
} }
} }
impl From<DateTime> for Value { impl From<crate::DateTime> for Value {
fn from(date_time: DateTime) -> Value { fn from(date_time: crate::DateTime) -> Value {
Value::Date(date_time) Value::Date(date_time)
} }
} }
@@ -233,6 +233,12 @@ impl From<Vec<u8>> for Value {
} }
} }
impl From<PreTokenizedString> for Value {
fn from(pretokenized_string: PreTokenizedString) -> Value {
Value::PreTokStr(pretokenized_string)
}
}
mod binary_serialize { mod binary_serialize {
use super::Value; use super::Value;
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable}; use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
@@ -356,3 +362,17 @@ mod binary_serialize {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::Value;
use crate::DateTime;
use std::str::FromStr;
#[test]
fn test_serialize_date() {
let value = Value::Date(DateTime::from_str("1996-12-20T00:39:57+00:00").unwrap());
let serialized_value_json = serde_json::to_string_pretty(&value).unwrap();
assert_eq!(serialized_value_json, r#""1996-12-20T00:39:57+00:00""#);
}
}

View File

@@ -331,9 +331,8 @@ mod tests {
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::iter::Iterator; use std::iter::Iterator;
const TEST_TEXT: &'static str = const TEST_TEXT: &'static str = r#"Rust is a systems programming language sponsored by
r#"Rust is a systems programming language sponsored by Mozilla which Mozilla which describes it as a "safe, concurrent, practical language", supporting functional and
describes it as a "safe, concurrent, practical language", supporting functional and
imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?], imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?],
but its designers intend it to provide better memory safety while still maintaining but its designers intend it to provide better memory safety while still maintaining
performance. performance.
@@ -363,13 +362,13 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT); let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!( assert_eq!(
snippet.fragments, snippet.fragments,
"Rust is a systems programming language sponsored by \ "Rust is a systems programming language sponsored by\n\
Mozilla which\ndescribes it as a \"safe" Mozilla which describes it as a \"safe"
); );
assert_eq!( assert_eq!(
snippet.to_html(), snippet.to_html(),
"<b>Rust</b> is a systems programming <b>language</b> \ "<b>Rust</b> is a systems programming <b>language</b> \
sponsored by Mozilla which\ndescribes it as a &quot;safe" sponsored by\nMozilla which describes it as a &quot;safe"
) )
} }

View File

@@ -1,7 +1,10 @@
extern crate lz4;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
/// Name of the compression scheme used in the doc store.
///
/// This name is appended to the version string of tantivy.
pub const COMPRESSION: &'static str = "lz4";
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> { pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
compressed.clear(); compressed.clear();
let mut encoder = lz4::EncoderBuilder::new().build(compressed)?; let mut encoder = lz4::EncoderBuilder::new().build(compressed)?;

View File

@@ -2,6 +2,11 @@ use snap;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
/// Name of the compression scheme used in the doc store.
///
/// This name is appended to the version string of tantivy.
pub const COMPRESSION: &str = "snappy";
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> { pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
compressed.clear(); compressed.clear();
let mut encoder = snap::Writer::new(compressed); let mut encoder = snap::Writer::new(compressed);

View File

@@ -42,18 +42,22 @@ pub use self::writer::StoreWriter;
#[cfg(feature = "lz4")] #[cfg(feature = "lz4")]
mod compression_lz4; mod compression_lz4;
#[cfg(feature = "lz4")] #[cfg(feature = "lz4")]
use self::compression_lz4::*; pub use self::compression_lz4::COMPRESSION;
#[cfg(feature = "lz4")]
use self::compression_lz4::{compress, decompress};
#[cfg(not(feature = "lz4"))] #[cfg(not(feature = "lz4"))]
mod compression_snap; mod compression_snap;
#[cfg(not(feature = "lz4"))] #[cfg(not(feature = "lz4"))]
use self::compression_snap::*; pub use self::compression_snap::COMPRESSION;
#[cfg(not(feature = "lz4"))]
use self::compression_snap::{compress, decompress};
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr};
use crate::schema::Document; use crate::schema::Document;
use crate::schema::FieldValue; use crate::schema::FieldValue;
use crate::schema::Schema; use crate::schema::Schema;

View File

@@ -36,7 +36,7 @@ pub use self::termdict::{TermDictionary, TermDictionaryBuilder};
mod tests { mod tests {
use super::{TermDictionary, TermDictionaryBuilder, TermStreamer}; use super::{TermDictionary, TermDictionaryBuilder, TermStreamer};
use crate::core::Index; use crate::core::Index;
use crate::directory::{Directory, RAMDirectory, ReadOnlySource}; use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, ReadOnlySource};
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::schema::{Document, FieldType, Schema, TEXT}; use crate::schema::{Document, FieldType, Schema, TEXT};
use std::path::PathBuf; use std::path::PathBuf;

View File

@@ -2,8 +2,6 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//!
//! let tokenizer = RawTokenizer //! let tokenizer = RawTokenizer
//! .filter(AlphaNumOnlyFilter); //! .filter(AlphaNumOnlyFilter);
//! //!
@@ -20,7 +18,6 @@
//! assert!(stream.next().is_some()); //! assert!(stream.next().is_some());
//! // the "emoji" is dropped because its not an alphanum //! // the "emoji" is dropped because its not an alphanum
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};

View File

@@ -7,7 +7,6 @@
//! ```rust //! ```rust
//! use tantivy::schema::*; //! use tantivy::schema::*;
//! //!
//! # fn main() {
//! let mut schema_builder = Schema::builder(); //! let mut schema_builder = Schema::builder();
//! //!
//! let text_options = TextOptions::default() //! let text_options = TextOptions::default()
@@ -31,7 +30,6 @@
//! schema_builder.add_text_field("uuid", id_options); //! schema_builder.add_text_field("uuid", id_options);
//! //!
//! let schema = schema_builder.build(); //! let schema = schema_builder.build();
//! # }
//! ``` //! ```
//! //!
//! By default, `tantivy` offers the following tokenizers: //! By default, `tantivy` offers the following tokenizers:
@@ -66,12 +64,10 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//! let en_stem = SimpleTokenizer //! let en_stem = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(40)) //! .filter(RemoveLongFilter::limit(40))
//! .filter(LowerCaser) //! .filter(LowerCaser)
//! .filter(Stemmer::new(Language::English)); //! .filter(Stemmer::new(Language::English));
//! # }
//! ``` //! ```
//! //!
//! Once your tokenizer is defined, you need to //! Once your tokenizer is defined, you need to
@@ -81,13 +77,12 @@
//! # use tantivy::schema::Schema; //! # use tantivy::schema::Schema;
//! # use tantivy::tokenizer::*; //! # use tantivy::tokenizer::*;
//! # use tantivy::Index; //! # use tantivy::Index;
//! # fn main() { //! #
//! # let custom_en_tokenizer = SimpleTokenizer; //! let custom_en_tokenizer = SimpleTokenizer;
//! # let schema = Schema::builder().build(); //! # let schema = Schema::builder().build();
//! let index = Index::create_in_ram(schema); //! let index = Index::create_in_ram(schema);
//! index.tokenizers() //! index.tokenizers()
//! .register("custom_en", custom_en_tokenizer); //! .register("custom_en", custom_en_tokenizer);
//! # }
//! ``` //! ```
//! //!
//! If you built your schema programmatically, a complete example //! If you built your schema programmatically, a complete example
@@ -102,7 +97,6 @@
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! use tantivy::Index; //! use tantivy::Index;
//! //!
//! # fn main() {
//! let mut schema_builder = Schema::builder(); //! let mut schema_builder = Schema::builder();
//! let text_field_indexing = TextFieldIndexing::default() //! let text_field_indexing = TextFieldIndexing::default()
//! .set_tokenizer("custom_en") //! .set_tokenizer("custom_en")
@@ -121,8 +115,6 @@
//! index //! index
//! .tokenizers() //! .tokenizers()
//! .register("custom_en", custom_en_tokenizer); //! .register("custom_en", custom_en_tokenizer);
//! // ...
//! # }
//! ``` //! ```
//! //!
mod alphanum_only; mod alphanum_only;

View File

@@ -31,7 +31,7 @@ use super::{Token, TokenStream, Tokenizer};
/// ///
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// # fn main() { ///
/// let tokenizer = NgramTokenizer::new(2, 3, false); /// let tokenizer = NgramTokenizer::new(2, 3, false);
/// let mut stream = tokenizer.token_stream("hello"); /// let mut stream = tokenizer.token_stream("hello");
/// { /// {
@@ -77,7 +77,6 @@ use super::{Token, TokenStream, Tokenizer};
/// assert_eq!(token.offset_to, 5); /// assert_eq!(token.offset_to, 5);
/// } /// }
/// assert!(stream.next().is_none()); /// assert!(stream.next().is_none());
/// # }
/// ``` /// ```
#[derive(Clone)] #[derive(Clone)]
pub struct NgramTokenizer { pub struct NgramTokenizer {

View File

@@ -2,8 +2,6 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//!
//! let tokenizer = SimpleTokenizer //! let tokenizer = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(5)); //! .filter(RemoveLongFilter::limit(5));
//! //!
@@ -12,7 +10,6 @@
//! // out of the token stream. //! // out of the token stream.
//! assert_eq!(stream.next().unwrap().text, "nice"); //! assert_eq!(stream.next().unwrap().text, "nice");
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
//! //!
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};

View File

@@ -15,6 +15,7 @@ pub enum Language {
Greek, Greek,
Hungarian, Hungarian,
Italian, Italian,
Norwegian,
Portuguese, Portuguese,
Romanian, Romanian,
Russian, Russian,
@@ -38,6 +39,7 @@ impl Language {
Greek => Algorithm::Greek, Greek => Algorithm::Greek,
Hungarian => Algorithm::Hungarian, Hungarian => Algorithm::Hungarian,
Italian => Algorithm::Italian, Italian => Algorithm::Italian,
Norwegian => Algorithm::Norwegian,
Portuguese => Algorithm::Portuguese, Portuguese => Algorithm::Portuguese,
Romanian => Algorithm::Romanian, Romanian => Algorithm::Romanian,
Russian => Algorithm::Russian, Russian => Algorithm::Russian,

View File

@@ -2,7 +2,6 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//! let tokenizer = SimpleTokenizer //! let tokenizer = SimpleTokenizer
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()])); //! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
//! //!
@@ -10,7 +9,6 @@
//! assert_eq!(stream.next().unwrap().text, "fox"); //! assert_eq!(stream.next().unwrap().text, "fox");
//! assert_eq!(stream.next().unwrap().text, "crafty"); //! assert_eq!(stream.next().unwrap().text, "crafty");
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use fnv::FnvHasher; use fnv::FnvHasher;
@@ -46,7 +44,7 @@ impl StopWordFilter {
"there", "these", "they", "this", "to", "was", "will", "with", "there", "these", "they", "this", "to", "was", "will", "with",
]; ];
StopWordFilter::remove(words.iter().map(|s| s.to_string()).collect()) StopWordFilter::remove(words.iter().map(|&s| s.to_string()).collect())
} }
} }

View File

@@ -58,12 +58,10 @@ pub trait Tokenizer<'a>: Sized + Clone {
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// # fn main() {
/// let en_stem = SimpleTokenizer /// let en_stem = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser) /// .filter(LowerCaser)
/// .filter(Stemmer::default()); /// .filter(Stemmer::default());
/// # }
/// ``` /// ```
/// ///
fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self> fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self>
@@ -188,7 +186,6 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// ``` /// ```
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// # fn main() {
/// let tokenizer = SimpleTokenizer /// let tokenizer = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser); /// .filter(LowerCaser);
@@ -207,7 +204,6 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// assert_eq!(token.offset_to, 12); /// assert_eq!(token.offset_to, 12);
/// assert_eq!(token.position, 1); /// assert_eq!(token.position, 1);
/// } /// }
/// # }
/// ``` /// ```
/// ///
pub trait TokenStream { pub trait TokenStream {
@@ -227,17 +223,15 @@ pub trait TokenStream {
/// and `.token()`. /// and `.token()`.
/// ///
/// ``` /// ```
/// # use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// # ///
/// # fn main() { /// let tokenizer = SimpleTokenizer
/// # let tokenizer = SimpleTokenizer /// .filter(RemoveLongFilter::limit(40))
/// # .filter(RemoveLongFilter::limit(40)) /// .filter(LowerCaser);
/// # .filter(LowerCaser);
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer"); /// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
/// while let Some(token) = token_stream.next() { /// while let Some(token) = token_stream.next() {
/// println!("Token {:?}", token.text); /// println!("Token {:?}", token.text);
/// } /// }
/// # }
/// ``` /// ```
fn next(&mut self) -> Option<&Token> { fn next(&mut self) -> Option<&Token> {
if self.advance() { if self.advance() {

View File

@@ -1,6 +1,8 @@
use fail; use fail;
use std::path::Path; use std::path::Path;
use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite}; use tantivy::directory::{
Directory, ManagedDirectory, RAMDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use tantivy::doc; use tantivy::doc;
use tantivy::schema::{Schema, TEXT}; use tantivy::schema::{Schema, TEXT};
use tantivy::{Index, Term}; use tantivy::{Index, Term};
@@ -28,11 +30,11 @@ fn test_failpoints_managed_directory_gc_if_delete_fails() {
// The initial 1*off is there to allow for the removal of the // The initial 1*off is there to allow for the removal of the
// lock file. // lock file.
fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap(); fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap();
managed_directory.garbage_collect(Default::default); assert!(managed_directory.garbage_collect(Default::default).is_ok());
assert!(managed_directory.exists(test_path)); assert!(managed_directory.exists(test_path));
// running the gc a second time should remove the file. // running the gc a second time should remove the file.
managed_directory.garbage_collect(Default::default); assert!(managed_directory.garbage_collect(Default::default).is_ok());
assert!( assert!(
!managed_directory.exists(test_path), !managed_directory.exists(test_path),
"The file should have been deleted" "The file should have been deleted"