mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-04 00:02:55 +00:00
Compare commits
41 Commits
race-condi
...
segment_wr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0cba858b15 | ||
|
|
3c320b5926 | ||
|
|
8506d33b4d | ||
|
|
dde56e3756 | ||
|
|
be9057085c | ||
|
|
d13d5b8662 | ||
|
|
94e5d328af | ||
|
|
c0ec7ebf38 | ||
|
|
97d808ef50 | ||
|
|
a6a78fa607 | ||
|
|
d12a06b65b | ||
|
|
749432f949 | ||
|
|
c1400f25a7 | ||
|
|
87120acf7c | ||
|
|
401f74f7ae | ||
|
|
03d31f6713 | ||
|
|
a57faf07f6 | ||
|
|
562ea9a839 | ||
|
|
cf92cc1ada | ||
|
|
f6000aece7 | ||
|
|
2b3fe3a2b5 | ||
|
|
0fde90faac | ||
|
|
5838644b03 | ||
|
|
c0011edd05 | ||
|
|
431c187a60 | ||
|
|
392abec420 | ||
|
|
dfbe337fe2 | ||
|
|
b9896c4962 | ||
|
|
afa5715e56 | ||
|
|
79474288d0 | ||
|
|
daf64487b4 | ||
|
|
00816f5529 | ||
|
|
f73787e6e5 | ||
|
|
5cffa71467 | ||
|
|
02af28b3b7 | ||
|
|
afe0134d0f | ||
|
|
db9e81d0f9 | ||
|
|
3821f57ecc | ||
|
|
d379f98b22 | ||
|
|
ef3eddf3da | ||
|
|
08a2368845 |
24
CHANGELOG.md
24
CHANGELOG.md
@@ -1,3 +1,23 @@
|
||||
Tantivy 0.12.0
|
||||
==========================
|
||||
- By default IndexReader are in `Manual` mode.
|
||||
|
||||
|
||||
|
||||
Tantivy 0.11.3
|
||||
=======================
|
||||
- Fixed DateTime as a fast field (#735)
|
||||
|
||||
Tantivy 0.11.2
|
||||
=======================
|
||||
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
||||
- Exposing a constructor for `WatchHandle` (#731)
|
||||
|
||||
Tantivy 0.11.1
|
||||
=====================
|
||||
- Bug fix #729
|
||||
|
||||
|
||||
Tantivy 0.11.0
|
||||
=====================
|
||||
|
||||
@@ -9,6 +29,7 @@ Tantivy 0.11.0
|
||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
|
||||
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
|
||||
- Added handling of pre-tokenized text fields (#642), which will enable users to
|
||||
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
|
||||
@@ -16,10 +37,11 @@ Tantivy 0.11.0
|
||||
|
||||
## How to update?
|
||||
|
||||
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||
an error and handling the `Result` is required.
|
||||
|
||||
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
|
||||
|
||||
Tantivy 0.10.2
|
||||
=====================
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.11.0"
|
||||
version = "0.11.3"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -33,17 +33,16 @@ fs2={version="0.4", optional=true}
|
||||
itertools = "0.8"
|
||||
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
||||
notify = {version="4", optional=true}
|
||||
bit-set = "0.5"
|
||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
||||
crossbeam = "0.7"
|
||||
futures = {version = "0.3", features=["thread-pool"] }
|
||||
owning_ref = "0.4"
|
||||
stable_deref_trait = "1.0.0"
|
||||
rust-stemmers = "1.1"
|
||||
rust-stemmers = "1.2"
|
||||
downcast-rs = { version="1.0" }
|
||||
tantivy-query-grammar = { path="./query-grammar" }
|
||||
tantivy-query-grammar = { version="0.11", path="./query-grammar" }
|
||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||
census = "0.3"
|
||||
census = "0.4"
|
||||
fnv = "1.0.6"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
|
||||
@@ -13,63 +13,100 @@
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
use tantivy::collector::FacetCollector;
|
||||
use tantivy::query::AllQuery;
|
||||
use tantivy::query::{AllQuery, TermQuery};
|
||||
use tantivy::schema::*;
|
||||
use tantivy::{doc, Index};
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new()?;
|
||||
// Let's create a temporary directory for the sake of this example
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
schema_builder.add_text_field("name", TEXT | STORED);
|
||||
|
||||
// this is our faceted field
|
||||
schema_builder.add_facet_field("tags");
|
||||
let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
|
||||
// this is our faceted field: its scientific classification
|
||||
let classification = schema_builder.add_facet_field("classification");
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
let name = schema.get_field("name").unwrap();
|
||||
let tags = schema.get_field("tags").unwrap();
|
||||
let mut index_writer = index.writer(30_000_000)?;
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
index_writer.add_document(doc!(
|
||||
name => "the ditch",
|
||||
tags => Facet::from("/pools/north")
|
||||
name => "Cat",
|
||||
classification => Facet::from("/Felidae/Felinae/Felis")
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
name => "little stacey",
|
||||
tags => Facet::from("/pools/south")
|
||||
name => "Canada lynx",
|
||||
classification => Facet::from("/Felidae/Felinae/Lynx")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Cheetah",
|
||||
classification => Facet::from("/Felidae/Felinae/Acinonyx")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Tiger",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Lion",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Jaguar",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Sunda clouded leopard",
|
||||
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
name => "Fossa",
|
||||
classification => Facet::from("/Eupleridae/Cryptoprocta")
|
||||
));
|
||||
|
||||
index_writer.commit()?;
|
||||
|
||||
let reader = index.reader()?;
|
||||
|
||||
let searcher = reader.searcher();
|
||||
{
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
facet_collector.add_facet("/Felidae");
|
||||
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
// This lists all of the facet counts, right below "/Felidae".
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/Felidae/Felinae"), 3),
|
||||
(&Facet::from("/Felidae/Pantherinae"), 4),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
let mut facet_collector = FacetCollector::for_field(tags);
|
||||
facet_collector.add_facet("/pools");
|
||||
// Facets are also searchable.
|
||||
//
|
||||
// For instance a common UI pattern is to allow the user someone to click on a facet link
|
||||
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
|
||||
//
|
||||
// The search would then look as follows.
|
||||
|
||||
let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||
|
||||
// This lists all of the facet counts
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/pools/north"), 1),
|
||||
(&Facet::from("/pools/south"), 1),
|
||||
]
|
||||
);
|
||||
// Check the reference doc for different ways to create a `Facet` object.
|
||||
{
|
||||
let facet = Facet::from_text("/Felidae/Pantherinae");
|
||||
let facet_term = Term::from_facet(classification, &facet);
|
||||
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
||||
let mut facet_collector = FacetCollector::for_field(classification);
|
||||
facet_collector.add_facet("/Felidae/Pantherinae");
|
||||
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
|
||||
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -65,8 +65,7 @@ fn main() -> tantivy::Result<()> {
|
||||
tokens: pre_tokenize_text(body_text),
|
||||
};
|
||||
|
||||
// Now lets create a document and add our `PreTokenizedString` using
|
||||
// `add_pre_tokenized_text` method of `Document`
|
||||
// Now lets create a document and add our `PreTokenizedString`
|
||||
let old_man_doc = doc!(title => title_tok, body => body_tok);
|
||||
|
||||
// ... now let's just add it to the IndexWriter
|
||||
@@ -114,6 +113,9 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
assert_eq!(count, 2);
|
||||
|
||||
// Now let's print out the results.
|
||||
// Note that the tokens are not stored along with the original text
|
||||
// in the document store
|
||||
for (_score, doc_address) in top_docs {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
||||
|
||||
3
query-grammar/README.md
Normal file
3
query-grammar/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Tantivy Query Grammar
|
||||
|
||||
This crate is used by tantivy to parse queries.
|
||||
@@ -13,44 +13,29 @@ use crate::SegmentReader;
|
||||
/// use tantivy::collector::Count;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result};
|
||||
/// use tantivy::{doc, Index};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// {
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let count = searcher.search(&query, &Count).unwrap();
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// assert_eq!(count, 2);
|
||||
/// }
|
||||
/// // Here comes the important part
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let count = searcher.search(&query, &Count).unwrap();
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// assert_eq!(count, 2);
|
||||
/// ```
|
||||
pub struct Count;
|
||||
|
||||
|
||||
@@ -86,7 +86,6 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// use tantivy::schema::{Facet, Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
///
|
||||
@@ -127,7 +126,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/lang");
|
||||
/// facet_collector.add_facet("/category");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
@@ -143,7 +142,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
///
|
||||
@@ -158,8 +157,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
/// ]);
|
||||
/// }
|
||||
///
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// {
|
||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||
/// facet_collector.add_facet("/category/fiction");
|
||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||
///
|
||||
@@ -172,6 +171,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// # assert!(example().is_ok());
|
||||
/// ```
|
||||
pub struct FacetCollector {
|
||||
field: Field,
|
||||
@@ -452,9 +452,11 @@ impl FacetCounts {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{FacetCollector, FacetCounts};
|
||||
use crate::collector::Count;
|
||||
use crate::core::Index;
|
||||
use crate::query::AllQuery;
|
||||
use crate::schema::{Document, Facet, Field, Schema};
|
||||
use crate::query::{AllQuery, QueryParser, TermQuery};
|
||||
use crate::schema::{Document, Facet, Field, IndexRecordOption, Schema};
|
||||
use crate::Term;
|
||||
use rand::distributions::Uniform;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::{thread_rng, Rng};
|
||||
@@ -544,6 +546,56 @@ mod tests {
|
||||
assert_eq!(facets[0].1, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_doc_search_by_facet() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let facet_field = schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/A/A"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/A/B"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/A/C/A"),
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
facet_field => Facet::from_text(&"/D/C/A"),
|
||||
));
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 4);
|
||||
|
||||
let count_facet = |facet_str: &str| {
|
||||
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str));
|
||||
searcher
|
||||
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
assert_eq!(count_facet("/"), 4);
|
||||
assert_eq!(count_facet("/A"), 3);
|
||||
assert_eq!(count_facet("/A/B"), 1);
|
||||
assert_eq!(count_facet("/A/C"), 1);
|
||||
assert_eq!(count_facet("/A/C/A"), 1);
|
||||
assert_eq!(count_facet("/C/A"), 0);
|
||||
{
|
||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A/B").unwrap();
|
||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("facet:/A").unwrap();
|
||||
assert_eq!(3, searcher.search(&query, &Count).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_used_facet_collector() {
|
||||
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
|
||||
|
||||
@@ -108,49 +108,35 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
|
||||
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result};
|
||||
/// use tantivy::{doc, Index};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// let mut collectors = MultiCollector::new();
|
||||
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
||||
/// let count_handle = collectors.add_collector(Count);
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let count = count_handle.extract(&mut multi_fruit);
|
||||
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
||||
/// let mut collectors = MultiCollector::new();
|
||||
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
||||
/// let count_handle = collectors.add_collector(Count);
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
|
||||
///
|
||||
/// # assert_eq!(count, 2);
|
||||
/// # assert_eq!(top_docs.len(), 2);
|
||||
/// let count = count_handle.extract(&mut multi_fruit);
|
||||
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// assert_eq!(count, 2);
|
||||
/// assert_eq!(top_docs.len(), 2);
|
||||
/// ```
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[derive(Default)]
|
||||
|
||||
@@ -29,43 +29,29 @@ use std::fmt;
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::query::QueryParser;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, DocAddress, Index, Result};
|
||||
/// use tantivy::{doc, DocAddress, Index};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
||||
/// assert!(index_writer.commit().is_ok());
|
||||
///
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary").unwrap();
|
||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
||||
/// ```
|
||||
pub struct TopDocs(TopCollector<Score>);
|
||||
|
||||
@@ -102,15 +88,12 @@ impl TopDocs {
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// # index_writer.add_document(doc!(
|
||||
/// # title => "The Name of the Wind",
|
||||
/// # rating => 92u64,
|
||||
/// # ));
|
||||
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||
/// # index_writer.commit()?;
|
||||
/// # let reader = index.reader()?;
|
||||
/// # assert!(index_writer.commit().is_ok());
|
||||
/// # let reader = index.reader().unwrap();
|
||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||
/// # assert_eq!(top_docs,
|
||||
@@ -202,27 +185,33 @@ impl TopDocs {
|
||||
/// use tantivy::collector::TopDocs;
|
||||
/// use tantivy::schema::Field;
|
||||
///
|
||||
/// # fn create_schema() -> Schema {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # schema_builder.add_text_field("product_name", TEXT);
|
||||
/// # schema_builder.add_u64_field("popularity", FAST);
|
||||
/// # schema_builder.build()
|
||||
/// # }
|
||||
/// #
|
||||
/// # fn main() -> tantivy::Result<()> {
|
||||
/// # let schema = create_schema();
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// #
|
||||
/// fn create_schema() -> Schema {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// schema_builder.add_text_field("product_name", TEXT);
|
||||
/// schema_builder.add_u64_field("popularity", FAST);
|
||||
/// schema_builder.build()
|
||||
/// }
|
||||
///
|
||||
/// fn create_index() -> tantivy::Result<Index> {
|
||||
/// let schema = create_schema();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
|
||||
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
|
||||
/// index_writer.commit()?;
|
||||
/// Ok(index)
|
||||
/// }
|
||||
///
|
||||
/// let index = create_index().unwrap();
|
||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||
/// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||
/// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
|
||||
/// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
|
||||
/// # index_writer.commit()?;
|
||||
/// // ...
|
||||
/// # let user_query = "diary";
|
||||
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
|
||||
///
|
||||
/// let user_query_str = "diary";
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![product_name]);
|
||||
/// let query = query_parser.parse_query(user_query_str).unwrap();
|
||||
///
|
||||
/// // This is where we build our collector with our custom score.
|
||||
/// let top_docs_by_custom_score = TopDocs
|
||||
@@ -249,15 +238,12 @@ impl TopDocs {
|
||||
/// popularity_boost_score * original_score
|
||||
/// }
|
||||
/// });
|
||||
/// # let reader = index.reader()?;
|
||||
/// # let searcher = reader.searcher();
|
||||
/// let reader = index.reader().unwrap();
|
||||
/// let searcher = reader.searcher();
|
||||
/// // ... and here are our documents. Note this is a simple vec.
|
||||
/// // The `Score` in the pair is our tweaked score.
|
||||
/// let resulting_docs: Vec<(Score, DocAddress)> =
|
||||
/// searcher.search(&*query, &top_docs_by_custom_score)?;
|
||||
///
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// searcher.search(&query, &top_docs_by_custom_score).unwrap();
|
||||
/// ```
|
||||
///
|
||||
/// # See also
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use super::segment::create_segment;
|
||||
use super::segment::Segment;
|
||||
use crate::core::Executor;
|
||||
use crate::core::IndexMeta;
|
||||
@@ -331,9 +330,8 @@ impl Index {
|
||||
.collect())
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn segment(&self, segment_meta: SegmentMeta) -> Segment {
|
||||
create_segment(self.clone(), segment_meta)
|
||||
pub(crate) fn segment(&self, segment_meta: SegmentMeta) -> Segment {
|
||||
Segment::for_index(self.clone(), segment_meta)
|
||||
}
|
||||
|
||||
/// Creates a new segment.
|
||||
@@ -344,6 +342,13 @@ impl Index {
|
||||
self.segment(segment_meta)
|
||||
}
|
||||
|
||||
/// Creates a new segment.
|
||||
pub(crate) fn new_segment_unpersisted(&self) -> Segment {
|
||||
let meta = self
|
||||
.inventory
|
||||
.new_segment_meta(SegmentId::generate_random(), 0);
|
||||
Segment::new_volatile(meta, self.schema())
|
||||
}
|
||||
/// Return a reference to the index directory.
|
||||
pub fn directory(&self) -> &ManagedDirectory {
|
||||
&self.directory
|
||||
@@ -466,7 +471,7 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||
}
|
||||
|
||||
#[cfg(feature = "mmap")]
|
||||
@@ -490,7 +495,7 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
||||
test_index_on_commit_reload_policy_aux(field, index.clone(), &index, &reader);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -532,12 +537,11 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
assert_eq!(reader.searcher().num_docs(), 0);
|
||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
||||
test_index_on_commit_reload_policy_aux(field, read_index, &write_index, &reader);
|
||||
}
|
||||
}
|
||||
|
||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
||||
let mut reader_index = reader.index();
|
||||
fn test_index_on_commit_reload_policy_aux(field: Field, mut reader_index: Index, index: &Index, reader: &IndexReader) {
|
||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
||||
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
||||
let _ = sender.send(());
|
||||
|
||||
@@ -3,21 +3,62 @@ use crate::core::Index;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::error::{OpenReadError, OpenWriteError};
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::{Directory, ManagedDirectory, RAMDirectory};
|
||||
use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::indexer::segment_serializer::SegmentSerializer;
|
||||
use crate::schema::Schema;
|
||||
use crate::Opstamp;
|
||||
use crate::Result;
|
||||
use failure::_core::ops::DerefMut;
|
||||
use std::fmt;
|
||||
use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use std::result;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) enum SegmentDirectory {
|
||||
Persisted(ManagedDirectory),
|
||||
Volatile(RAMDirectory),
|
||||
}
|
||||
|
||||
impl SegmentDirectory {
|
||||
pub fn new_volatile() -> SegmentDirectory {
|
||||
SegmentDirectory::Volatile(RAMDirectory::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ManagedDirectory> for SegmentDirectory {
|
||||
fn from(directory: ManagedDirectory) -> Self {
|
||||
SegmentDirectory::Persisted(directory)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for SegmentDirectory {
|
||||
type Target = dyn Directory;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
match self {
|
||||
SegmentDirectory::Volatile(dir) => dir,
|
||||
SegmentDirectory::Persisted(dir) => dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for SegmentDirectory {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
match self {
|
||||
SegmentDirectory::Volatile(dir) => dir,
|
||||
SegmentDirectory::Persisted(dir) => dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A segment is a piece of the index.
|
||||
#[derive(Clone)]
|
||||
pub struct Segment {
|
||||
index: Index,
|
||||
schema: Schema,
|
||||
meta: SegmentMeta,
|
||||
directory: SegmentDirectory,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Segment {
|
||||
@@ -26,23 +67,56 @@ impl fmt::Debug for Segment {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
///
|
||||
/// The function is here to make it private outside `tantivy`.
|
||||
/// #[doc(hidden)]
|
||||
pub fn create_segment(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment { index, meta }
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
/// Returns the index the segment belongs to.
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
/// Returns our index's schema.
|
||||
// TODO return a ref.
|
||||
pub fn schema(&self) -> Schema {
|
||||
self.schema.clone()
|
||||
}
|
||||
|
||||
/// Returns our index's schema.
|
||||
pub fn schema(&self) -> Schema {
|
||||
self.index.schema()
|
||||
pub(crate) fn new_persisted(
|
||||
meta: SegmentMeta,
|
||||
directory: ManagedDirectory,
|
||||
schema: Schema,
|
||||
) -> Segment {
|
||||
Segment {
|
||||
meta,
|
||||
schema,
|
||||
directory: SegmentDirectory::from(directory),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new segment that embeds its own `RAMDirectory`.
|
||||
///
|
||||
/// That segment is entirely dissociated from the index directory.
|
||||
/// It will be persisted by a background thread in charge of IO.
|
||||
pub fn new_volatile(meta: SegmentMeta, schema: Schema) -> Segment {
|
||||
Segment {
|
||||
schema,
|
||||
meta,
|
||||
directory: SegmentDirectory::new_volatile(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new segment given an `Index` and a `SegmentId`
|
||||
pub(crate) fn for_index(index: Index, meta: SegmentMeta) -> Segment {
|
||||
Segment {
|
||||
directory: SegmentDirectory::Persisted(index.directory().clone()),
|
||||
schema: index.schema(),
|
||||
meta,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn persist(&mut self, mut dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||
if let SegmentDirectory::Persisted(_) = self.directory {
|
||||
// this segment is already persisted.
|
||||
return Ok(());
|
||||
}
|
||||
if let SegmentDirectory::Volatile(ram_directory) = &self.directory {
|
||||
ram_directory.persist(&mut dest_directory)?;
|
||||
}
|
||||
self.directory = SegmentDirectory::Persisted(dest_directory);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the segment meta-information
|
||||
@@ -56,7 +130,8 @@ impl Segment {
|
||||
/// as we finalize a fresh new segment.
|
||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
directory: self.directory,
|
||||
schema: self.schema,
|
||||
meta: self.meta.with_max_doc(max_doc),
|
||||
}
|
||||
}
|
||||
@@ -64,7 +139,8 @@ impl Segment {
|
||||
#[doc(hidden)]
|
||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||
Segment {
|
||||
index: self.index,
|
||||
directory: self.directory,
|
||||
schema: self.schema,
|
||||
meta: self.meta.with_delete_meta(num_deleted_docs, opstamp),
|
||||
}
|
||||
}
|
||||
@@ -88,7 +164,7 @@ impl Segment {
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let path = self.relative_path(component);
|
||||
let source = self.index.directory().open_read(&path)?;
|
||||
let source = self.directory.open_read(&path)?;
|
||||
Ok(source)
|
||||
}
|
||||
|
||||
@@ -98,7 +174,7 @@ impl Segment {
|
||||
component: SegmentComponent,
|
||||
) -> result::Result<WritePtr, OpenWriteError> {
|
||||
let path = self.relative_path(component);
|
||||
let write = self.index.directory_mut().open_write(&path)?;
|
||||
let write = self.directory.open_write(&path)?;
|
||||
Ok(write)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,6 +57,68 @@ pub struct SegmentReader {
|
||||
}
|
||||
|
||||
impl SegmentReader {
|
||||
/// Open a new segment for reading.
|
||||
pub fn open(segment: &Segment) -> Result<SegmentReader> {
|
||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||
|
||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||
|
||||
fail_point!("SegmentReader::open#middle");
|
||||
|
||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||
|
||||
let positions_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let positions_idx_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let schema = segment.schema();
|
||||
|
||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||
let fast_field_readers =
|
||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
||||
|
||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||
|
||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
Some(DeleteBitSet::open(delete_data))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(SegmentReader {
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_doc: segment.meta().max_doc(),
|
||||
num_docs: segment.meta().num_docs(),
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_readers: fast_field_readers,
|
||||
fieldnorms_composite,
|
||||
segment_id: segment.id(),
|
||||
store_source,
|
||||
delete_bitset_opt,
|
||||
positions_composite,
|
||||
positions_idx_composite,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the highest document id ever attributed in
|
||||
/// this segment + 1.
|
||||
/// Today, `tantivy` does not handle deletes, so it happens
|
||||
@@ -144,68 +206,6 @@ impl SegmentReader {
|
||||
StoreReader::from_source(self.store_source.clone())
|
||||
}
|
||||
|
||||
/// Open a new segment for reading.
|
||||
pub fn open(segment: &Segment) -> Result<SegmentReader> {
|
||||
let termdict_source = segment.open_read(SegmentComponent::TERMS)?;
|
||||
let termdict_composite = CompositeFile::open(&termdict_source)?;
|
||||
|
||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||
|
||||
fail_point!("SegmentReader::open#middle");
|
||||
|
||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||
|
||||
let positions_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONS) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let positions_idx_composite = {
|
||||
if let Ok(source) = segment.open_read(SegmentComponent::POSITIONSSKIP) {
|
||||
CompositeFile::open(&source)?
|
||||
} else {
|
||||
CompositeFile::empty()
|
||||
}
|
||||
};
|
||||
|
||||
let schema = segment.schema();
|
||||
|
||||
let fast_fields_data = segment.open_read(SegmentComponent::FASTFIELDS)?;
|
||||
let fast_fields_composite = CompositeFile::open(&fast_fields_data)?;
|
||||
let fast_field_readers =
|
||||
Arc::new(FastFieldReaders::load_all(&schema, &fast_fields_composite)?);
|
||||
|
||||
let fieldnorms_data = segment.open_read(SegmentComponent::FIELDNORMS)?;
|
||||
let fieldnorms_composite = CompositeFile::open(&fieldnorms_data)?;
|
||||
|
||||
let delete_bitset_opt = if segment.meta().has_deletes() {
|
||||
let delete_data = segment.open_read(SegmentComponent::DELETE)?;
|
||||
Some(DeleteBitSet::open(delete_data))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(SegmentReader {
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_doc: segment.meta().max_doc(),
|
||||
num_docs: segment.meta().num_docs(),
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_readers: fast_field_readers,
|
||||
fieldnorms_composite,
|
||||
segment_id: segment.id(),
|
||||
store_source,
|
||||
delete_bitset_opt,
|
||||
positions_composite,
|
||||
positions_idx_composite,
|
||||
schema,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a field reader associated to the field given in argument.
|
||||
/// If the field was not present in the index during indexing time,
|
||||
/// the InvertedIndexReader is empty.
|
||||
|
||||
@@ -119,7 +119,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// Specifically, subsequent writes or flushes should
|
||||
/// have no effect on the returned `ReadOnlySource` object.
|
||||
///
|
||||
/// You should only use this to read files create with [`open_write`]
|
||||
/// You should only use this to read files create with [Directory::open_write].
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||
|
||||
/// Removes a file
|
||||
@@ -160,7 +160,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
///
|
||||
/// This should only be used for small files.
|
||||
///
|
||||
/// You should only use this to read files create with [`atomic_write`]
|
||||
/// You should only use this to read files create with [Directory::atomic_write].
|
||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||
|
||||
/// Atomically replace the content of a file with data.
|
||||
@@ -197,7 +197,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
/// Registers a callback that will be called whenever a change on the `meta.json`
|
||||
/// using the `atomic_write` API is detected.
|
||||
///
|
||||
/// The behavior when using `.watch()` on a file using `.open_write(...)` is, on the other
|
||||
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other
|
||||
/// hand, undefined.
|
||||
///
|
||||
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::Version;
|
||||
use std::error::Error as StdError;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
@@ -156,6 +157,65 @@ impl StdError for OpenWriteError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of index incompatibility between the library and the index found on disk
|
||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
||||
pub enum Incompatibility {
|
||||
/// This library cannot decompress the index found on disk
|
||||
CompressionMismatch {
|
||||
/// Compression algorithm used by the current version of tantivy
|
||||
library_compression_format: String,
|
||||
/// Compression algorithm that was used to serialise the index
|
||||
index_compression_format: String,
|
||||
},
|
||||
/// The index format found on disk isn't supported by this version of the library
|
||||
IndexMismatch {
|
||||
/// Version used by the library
|
||||
library_version: Version,
|
||||
/// Version the index was built with
|
||||
index_version: Version,
|
||||
},
|
||||
}
|
||||
|
||||
impl fmt::Debug for Incompatibility {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
match self {
|
||||
Incompatibility::CompressionMismatch {
|
||||
library_compression_format,
|
||||
index_compression_format,
|
||||
} => {
|
||||
let err = format!(
|
||||
"Library was compiled with {:?} compression, index was compressed with {:?}",
|
||||
library_compression_format, index_compression_format
|
||||
);
|
||||
let advice = format!(
|
||||
"Change the feature flag to {:?} and rebuild the library",
|
||||
index_compression_format
|
||||
);
|
||||
write!(f, "{}. {}", err, advice)?;
|
||||
}
|
||||
Incompatibility::IndexMismatch {
|
||||
library_version,
|
||||
index_version,
|
||||
} => {
|
||||
let err = format!(
|
||||
"Library version: {}, index version: {}",
|
||||
library_version.index_format_version, index_version.index_format_version
|
||||
);
|
||||
// TODO make a more useful error message
|
||||
// include the version range that supports this index_format_version
|
||||
let advice = format!(
|
||||
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
|
||||
and rebuild your project.",
|
||||
index_version.index_format_version, index_version.major, index_version.minor
|
||||
);
|
||||
write!(f, "{}. {}", err, advice)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that may occur when accessing a file read
|
||||
#[derive(Debug)]
|
||||
pub enum OpenReadError {
|
||||
@@ -164,6 +224,8 @@ pub enum OpenReadError {
|
||||
/// Any kind of IO error that happens when
|
||||
/// interacting with the underlying IO device.
|
||||
IOError(IOError),
|
||||
/// This library doesn't support the index version found on disk
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
impl From<IOError> for OpenReadError {
|
||||
@@ -183,19 +245,9 @@ impl fmt::Display for OpenReadError {
|
||||
"an io error occurred while opening a file for reading: '{}'",
|
||||
err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdError for OpenReadError {
|
||||
fn description(&self) -> &str {
|
||||
"error occurred while opening a file for reading"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn StdError> {
|
||||
match *self {
|
||||
OpenReadError::FileDoesNotExist(_) => None,
|
||||
OpenReadError::IOError(ref err) => Some(err),
|
||||
OpenReadError::IncompatibleIndex(ref footer) => {
|
||||
write!(f, "Incompatible index format: {:?}", footer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -216,6 +268,12 @@ impl From<IOError> for DeleteError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Incompatibility> for OpenReadError {
|
||||
fn from(incompatibility: Incompatibility) -> Self {
|
||||
OpenReadError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DeleteError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
|
||||
@@ -1,181 +1,175 @@
|
||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
|
||||
use crate::directory::error::Incompatibility;
|
||||
use crate::directory::read_only_source::ReadOnlySource;
|
||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use crate::Version;
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use crc32fast::Hasher;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
const COMMON_FOOTER_SIZE: usize = 4 * 5;
|
||||
|
||||
type CrcHashU32 = u32;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Footer {
|
||||
pub tantivy_version: (u32, u32, u32),
|
||||
pub version: Version,
|
||||
pub meta: String,
|
||||
pub versioned_footer: VersionedFooter,
|
||||
}
|
||||
|
||||
/// Serialises the footer to a byte-array
|
||||
/// - versioned_footer_len : 4 bytes
|
||||
///- versioned_footer: variable bytes
|
||||
/// - meta_len: 4 bytes
|
||||
/// - meta: variable bytes
|
||||
/// - version_len: 4 bytes
|
||||
/// - version json: variable bytes
|
||||
impl BinarySerializable for Footer {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
BinarySerializable::serialize(&self.versioned_footer, writer)?;
|
||||
BinarySerializable::serialize(&self.meta, writer)?;
|
||||
let version_string =
|
||||
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
|
||||
BinarySerializable::serialize(&version_string, writer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let versioned_footer = VersionedFooter::deserialize(reader)?;
|
||||
let meta = String::deserialize(reader)?;
|
||||
let version_json = String::deserialize(reader)?;
|
||||
let version = serde_json::from_str(&version_json)?;
|
||||
Ok(Footer {
|
||||
version,
|
||||
meta,
|
||||
versioned_footer,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Footer {
|
||||
pub fn new(versioned_footer: VersionedFooter) -> Self {
|
||||
let tantivy_version = (
|
||||
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
||||
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
|
||||
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
|
||||
);
|
||||
let version = crate::VERSION.clone();
|
||||
let meta = version.to_string();
|
||||
Footer {
|
||||
tantivy_version,
|
||||
meta: format!(
|
||||
"tantivy v{}.{}.{}, index_format v{}",
|
||||
tantivy_version.0,
|
||||
tantivy_version.1,
|
||||
tantivy_version.2,
|
||||
versioned_footer.version()
|
||||
),
|
||||
version,
|
||||
meta,
|
||||
versioned_footer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialises the footer to a byte-array
|
||||
/// [ versioned_footer | meta | common_footer ]
|
||||
/// [ 0..8 | 8..32 | 32..52 ]
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
let mut res = self.versioned_footer.to_bytes();
|
||||
res.extend_from_slice(self.meta.as_bytes());
|
||||
let len = res.len();
|
||||
res.resize(len + COMMON_FOOTER_SIZE, 0);
|
||||
let mut common_footer = &mut res[len..];
|
||||
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
|
||||
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
|
||||
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
|
||||
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
|
||||
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
|
||||
let len = data.len();
|
||||
if len < COMMON_FOOTER_SIZE + 4 {
|
||||
// 4 bytes for index version, stored in versioned footer
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
|
||||
if len < size as usize {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"The footer len is {}, while the entire file len is {}. \
|
||||
Your index is either corrupted or was built using a tantivy version\
|
||||
anterior to 0.11.",
|
||||
size, len
|
||||
),
|
||||
));
|
||||
}
|
||||
let footer = &data[len - size as usize..];
|
||||
let meta_len = LittleEndian::read_u32(&footer[size - COMMON_FOOTER_SIZE..]) as usize;
|
||||
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
|
||||
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
|
||||
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
|
||||
Ok(Footer {
|
||||
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
|
||||
meta: String::from_utf8_lossy(
|
||||
&footer[size - meta_len - COMMON_FOOTER_SIZE..size - COMMON_FOOTER_SIZE],
|
||||
)
|
||||
.into_owned(),
|
||||
versioned_footer: VersionedFooter::from_bytes(
|
||||
&footer[..size - meta_len - COMMON_FOOTER_SIZE],
|
||||
)?,
|
||||
})
|
||||
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
|
||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
||||
self.serialize(&mut counting_write)?;
|
||||
let written_len = counting_write.written_bytes();
|
||||
write.write_u32::<LittleEndian>(written_len as u32)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
||||
let footer = Footer::from_bytes(source.as_slice())?;
|
||||
let reader = source.slice_to(source.as_slice().len() - footer.size());
|
||||
Ok((footer, reader))
|
||||
if source.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
||||
source.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
|
||||
let body_len = body_footer.len() - footer_len;
|
||||
let (body, footer_data) = body_footer.split(body_len);
|
||||
let mut cursor = footer_data.as_slice();
|
||||
let footer = Footer::deserialize(&mut cursor)?;
|
||||
Ok((footer, body))
|
||||
}
|
||||
|
||||
pub fn size(&self) -> usize {
|
||||
self.versioned_footer.size() as usize + self.meta.len() + COMMON_FOOTER_SIZE
|
||||
/// Confirms that the index will be read correctly by this version of tantivy
|
||||
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory
|
||||
pub fn is_compatible(&self) -> Result<(), Incompatibility> {
|
||||
let library_version = crate::version();
|
||||
match &self.versioned_footer {
|
||||
VersionedFooter::V1 {
|
||||
crc32: _crc,
|
||||
store_compression: compression,
|
||||
} => {
|
||||
if &library_version.store_compression != compression {
|
||||
return Err(Incompatibility::CompressionMismatch {
|
||||
library_compression_format: library_version.store_compression.to_string(),
|
||||
index_compression_format: compression.to_string(),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
|
||||
library_version: library_version.clone(),
|
||||
index_version: self.version.clone(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Footer that includes a crc32 hash that enables us to checksum files in the index
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum VersionedFooter {
|
||||
UnknownVersion { version: u32, size: u32 },
|
||||
V0(CrcHashU32), // crc
|
||||
UnknownVersion,
|
||||
V1 {
|
||||
crc32: CrcHashU32,
|
||||
store_compression: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl BinarySerializable for VersionedFooter {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
let mut buf = Vec::new();
|
||||
match self {
|
||||
VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: compression,
|
||||
} => {
|
||||
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||
// [ version | crc_hash | compression_mode ]
|
||||
// [ 0..4 | 4..8 | variable ]
|
||||
BinarySerializable::serialize(&1u32, &mut buf)?;
|
||||
BinarySerializable::serialize(crc32, &mut buf)?;
|
||||
BinarySerializable::serialize(compression, &mut buf)?;
|
||||
}
|
||||
VersionedFooter::UnknownVersion => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Cannot serialize an unknown versioned footer ",
|
||||
));
|
||||
}
|
||||
}
|
||||
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
|
||||
writer.write_all(&buf[..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let len = VInt::deserialize(reader)?.0 as usize;
|
||||
let mut buf = vec![0u8; len];
|
||||
reader.read_exact(&mut buf[..])?;
|
||||
let mut cursor = &buf[..];
|
||||
let version = u32::deserialize(&mut cursor)?;
|
||||
if version == 1 {
|
||||
let crc32 = u32::deserialize(&mut cursor)?;
|
||||
let compression = String::deserialize(&mut cursor)?;
|
||||
Ok(VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: compression,
|
||||
})
|
||||
} else {
|
||||
Ok(VersionedFooter::UnknownVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionedFooter {
|
||||
/// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
||||
/// [ version | crc_hash ]
|
||||
/// [ 0..4 | 4..8 ]
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
match self {
|
||||
VersionedFooter::V0(crc) => {
|
||||
let mut buf = [0u8; 8];
|
||||
LittleEndian::write_u32(&mut buf[0..4], 0);
|
||||
LittleEndian::write_u32(&mut buf[4..8], *crc);
|
||||
buf.to_vec()
|
||||
}
|
||||
VersionedFooter::UnknownVersion { .. } => {
|
||||
panic!("Unsupported index should never get serialized");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
|
||||
assert!(footer.len() >= 4);
|
||||
if footer.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Footer should be more than 4 bytes.",
|
||||
));
|
||||
}
|
||||
let version = LittleEndian::read_u32(footer);
|
||||
match version {
|
||||
// the first 4 bytes should be zeroed out thus returning a `0`
|
||||
0 => {
|
||||
if footer.len() != 8 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"File corrupted. The versioned footer len is {}, while it should be 8",
|
||||
footer.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
|
||||
}
|
||||
version => Ok(VersionedFooter::UnknownVersion {
|
||||
version,
|
||||
size: footer.len() as u32,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn size(&self) -> u32 {
|
||||
match self {
|
||||
VersionedFooter::V0(_) => 8,
|
||||
VersionedFooter::UnknownVersion { size, .. } => *size,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn version(&self) -> u32 {
|
||||
match self {
|
||||
VersionedFooter::V0(_) => 0,
|
||||
VersionedFooter::UnknownVersion { version, .. } => *version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn crc(&self) -> Option<CrcHashU32> {
|
||||
match self {
|
||||
VersionedFooter::V0(crc) => Some(*crc),
|
||||
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
||||
VersionedFooter::UnknownVersion { .. } => None,
|
||||
}
|
||||
}
|
||||
@@ -211,10 +205,13 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
||||
|
||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||
let crc = self.hasher.take().unwrap().finalize();
|
||||
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
|
||||
let crc32 = self.hasher.take().unwrap().finalize();
|
||||
let footer = Footer::new(VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: crate::store::COMPRESSION.to_string(),
|
||||
});
|
||||
let mut writer = self.writer.take().unwrap();
|
||||
writer.write_all(&footer)?;
|
||||
footer.append_footer(&mut writer)?;
|
||||
writer.terminate()
|
||||
}
|
||||
}
|
||||
@@ -222,56 +219,121 @@ impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::CrcHashU32;
|
||||
use super::FooterProxy;
|
||||
use crate::common::BinarySerializable;
|
||||
use crate::directory::footer::{Footer, VersionedFooter};
|
||||
use crate::directory::TerminatingWrite;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use regex::Regex;
|
||||
|
||||
#[test]
|
||||
fn test_versioned_footer() {
|
||||
let mut vec = Vec::new();
|
||||
let footer_proxy = FooterProxy::new(&mut vec);
|
||||
assert!(footer_proxy.terminate().is_ok());
|
||||
assert_eq!(vec.len(), 167);
|
||||
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
||||
if let VersionedFooter::V1 {
|
||||
crc32: _,
|
||||
store_compression,
|
||||
} = footer.versioned_footer
|
||||
{
|
||||
assert_eq!(store_compression, crate::store::COMPRESSION);
|
||||
} else {
|
||||
panic!("Versioned footer should be V1.");
|
||||
}
|
||||
assert_eq!(&footer.version, crate::version());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_deserialize_footer() {
|
||||
let crc = 123456;
|
||||
let footer = Footer::new(VersionedFooter::V0(crc));
|
||||
let footer_bytes = footer.to_bytes();
|
||||
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
|
||||
let mut buffer = Vec::new();
|
||||
let crc32 = 123456u32;
|
||||
let footer: Footer = Footer::new(VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
});
|
||||
footer.serialize(&mut buffer).unwrap();
|
||||
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
|
||||
assert_eq!(footer_deser, footer);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn footer_length() {
|
||||
// test to make sure the ascii art in the doc-strings is correct
|
||||
let crc = 1111111 as u32;
|
||||
let versioned_footer = VersionedFooter::V0(crc);
|
||||
assert_eq!(versioned_footer.size(), 8);
|
||||
let crc32 = 1111111u32;
|
||||
let versioned_footer = VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
let mut buf = Vec::new();
|
||||
versioned_footer.serialize(&mut buf).unwrap();
|
||||
assert_eq!(buf.len(), 13);
|
||||
let footer = Footer::new(versioned_footer);
|
||||
let regex_ptn = Regex::new(
|
||||
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
|
||||
)
|
||||
.unwrap();
|
||||
assert!(regex_ptn.find(&footer.meta).is_some());
|
||||
assert!(regex_ptn.is_match(&footer.meta));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn versioned_footer_from_bytes() {
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
let v_footer_bytes = vec![0, 0, 0, 0, 12, 35, 89, 18];
|
||||
let versioned_footer = VersionedFooter::from_bytes(&v_footer_bytes).unwrap();
|
||||
let expected_versioned_footer =
|
||||
VersionedFooter::V0(LittleEndian::read_u32(&[12, 35, 89, 18]));
|
||||
assert_eq!(versioned_footer, expected_versioned_footer);
|
||||
|
||||
assert_eq!(versioned_footer.to_bytes(), v_footer_bytes);
|
||||
}
|
||||
|
||||
#[should_panic(expected = "Unsupported index should never get serialized")]
|
||||
#[test]
|
||||
fn versioned_footer_panic() {
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
let v_footer_bytes = vec![1; 8];
|
||||
let versioned_footer = VersionedFooter::from_bytes(&v_footer_bytes).unwrap();
|
||||
let expected_version = LittleEndian::read_u32(&[1, 1, 1, 1]);
|
||||
let expected_versioned_footer = VersionedFooter::UnknownVersion {
|
||||
version: expected_version,
|
||||
size: v_footer_bytes.len() as u32,
|
||||
let v_footer_bytes = vec![
|
||||
// versionned footer length
|
||||
12 | 128,
|
||||
// index format version
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
// crc 32
|
||||
12,
|
||||
35,
|
||||
89,
|
||||
18,
|
||||
// compression format
|
||||
3 | 128,
|
||||
b'l',
|
||||
b'z',
|
||||
b'4',
|
||||
];
|
||||
let mut cursor = &v_footer_bytes[..];
|
||||
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
||||
assert!(cursor.is_empty());
|
||||
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
|
||||
crc32: expected_crc,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
assert_eq!(versioned_footer, expected_versioned_footer);
|
||||
let mut buffer = Vec::new();
|
||||
assert!(versioned_footer.serialize(&mut buffer).is_ok());
|
||||
assert_eq!(&v_footer_bytes[..], &buffer[..]);
|
||||
}
|
||||
|
||||
versioned_footer.to_bytes();
|
||||
#[test]
|
||||
fn versioned_footer_panic() {
|
||||
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
|
||||
let mut b = &v_footer_bytes[..];
|
||||
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
|
||||
assert!(b.is_empty());
|
||||
let expected_versioned_footer = VersionedFooter::UnknownVersion;
|
||||
assert_eq!(versioned_footer, expected_versioned_footer);
|
||||
let mut buf = Vec::new();
|
||||
assert!(versioned_footer.serialize(&mut buf).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
fn compression_mismatch() {
|
||||
let crc32 = 1111111u32;
|
||||
let versioned_footer = VersionedFooter::V1 {
|
||||
crc32,
|
||||
store_compression: "lz4".to_string(),
|
||||
};
|
||||
let footer = Footer::new(versioned_footer);
|
||||
let res = footer.is_compatible();
|
||||
assert!(res.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::directory::{ReadOnlySource, WritePtr};
|
||||
use crate::directory::{WatchCallback, WatchHandle};
|
||||
use crate::error::DataCorruption;
|
||||
use crate::Directory;
|
||||
use crate::Result;
|
||||
|
||||
use crc32fast::Hasher;
|
||||
use serde_json;
|
||||
use std::collections::HashSet;
|
||||
@@ -65,7 +65,7 @@ fn save_managed_paths(
|
||||
|
||||
impl ManagedDirectory {
|
||||
/// Wraps a directory as managed directory.
|
||||
pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
|
||||
pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> {
|
||||
match directory.atomic_read(&MANAGED_FILEPATH) {
|
||||
Ok(data) => {
|
||||
let managed_files_json = String::from_utf8_lossy(&data);
|
||||
@@ -88,6 +88,11 @@ impl ManagedDirectory {
|
||||
meta_informations: Arc::default(),
|
||||
}),
|
||||
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
||||
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
||||
// For the moment, this should never happen `meta.json`
|
||||
// do not have any footer and cannot detect incompatibility.
|
||||
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -261,8 +266,9 @@ impl ManagedDirectory {
|
||||
impl Directory for ManagedDirectory {
|
||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||
let read_only_source = self.directory.open_read(path)?;
|
||||
let (_footer, reader) = Footer::extract_footer(read_only_source)
|
||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||
footer.is_compatible()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
@@ -409,6 +415,8 @@ mod tests_mmap_specific {
|
||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
||||
write.terminate().unwrap();
|
||||
|
||||
let read_source = managed_directory.open_read(test_path2).unwrap();
|
||||
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
|
||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||
|
||||
let mut corrupted_path = tempdir_path.clone();
|
||||
|
||||
@@ -131,14 +131,13 @@ impl MmapCache {
|
||||
}
|
||||
self.cache.remove(full_path);
|
||||
self.counters.miss += 1;
|
||||
Ok(if let Some(mmap) = open_mmap(full_path)? {
|
||||
let mmap_opt = open_mmap(full_path)?;
|
||||
Ok(mmap_opt.map(|mmap| {
|
||||
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
|
||||
let mmap_weak = Arc::downgrade(&mmap_arc);
|
||||
self.cache.insert(full_path.to_owned(), mmap_weak);
|
||||
Some(mmap_arc)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
mmap_arc
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,11 +23,9 @@ pub use self::directory::{Directory, DirectoryClone};
|
||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||
pub use self::ram_directory::RAMDirectory;
|
||||
pub use self::read_only_source::ReadOnlySource;
|
||||
pub(crate) use self::watch_event_router::WatchCallbackList;
|
||||
pub use self::watch_event_router::{WatchCallback, WatchHandle};
|
||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
||||
use std::io::{self, BufWriter, Write};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Outcome of the Garbage collection
|
||||
pub struct GarbageCollectionResult {
|
||||
/// List of files that were deleted in this cycle
|
||||
@@ -48,6 +46,9 @@ pub use self::mmap_directory::MmapDirectory;
|
||||
pub use self::managed_directory::ManagedDirectory;
|
||||
|
||||
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
|
||||
///
|
||||
/// The point is that while the type is public, it cannot be built by anyone
|
||||
/// outside of this module.
|
||||
pub struct AntiCallToken(());
|
||||
|
||||
/// Trait used to indicate when no more write need to be done on a writer
|
||||
@@ -78,6 +79,13 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
||||
self.flush()
|
||||
}
|
||||
}
|
||||
|
||||
/// Write object for Directory.
|
||||
///
|
||||
/// `WritePtr` are required to implement both Write
|
||||
|
||||
@@ -144,6 +144,16 @@ impl RAMDirectory {
|
||||
pub fn total_mem_usage(&self) -> usize {
|
||||
self.fs.read().unwrap().total_mem_usage()
|
||||
}
|
||||
|
||||
pub fn persist(&self, dest: &mut dyn Directory) -> crate::Result<()> {
|
||||
let wlock = self.fs.write().unwrap();
|
||||
for (path, source) in wlock.fs.iter() {
|
||||
let mut dest_wrt = dest.open_write(path)?;
|
||||
dest_wrt.write_all(source.as_slice())?;
|
||||
dest_wrt.terminate()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory for RAMDirectory {
|
||||
@@ -191,7 +201,7 @@ impl Directory for RAMDirectory {
|
||||
// Reserve the path to prevent calls to .write() to succeed.
|
||||
self.fs.write().unwrap().write(path_buf.clone(), &[]);
|
||||
|
||||
let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||
let mut vec_writer = VecWriter::new(path_buf, self.clone());
|
||||
vec_writer.write_all(data)?;
|
||||
vec_writer.flush()?;
|
||||
if path == Path::new(&*META_FILEPATH) {
|
||||
|
||||
@@ -70,6 +70,12 @@ impl ReadOnlySource {
|
||||
(left, right)
|
||||
}
|
||||
|
||||
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
|
||||
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let left_len = self.len() - right_len;
|
||||
self.split(left_len)
|
||||
}
|
||||
|
||||
/// Creates a ReadOnlySource that is just a
|
||||
/// view over a slice of the data.
|
||||
///
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use super::*;
|
||||
use futures::channel::oneshot;
|
||||
use futures::executor::block_on;
|
||||
use std::io::Write;
|
||||
use std::mem;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -7,17 +9,109 @@ use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_ram_directory() {
|
||||
let mut ram_directory = RAMDirectory::create();
|
||||
test_directory(&mut ram_directory);
|
||||
#[cfg(feature = "mmap")]
|
||||
mod mmap_directory_tests {
|
||||
use crate::directory::MmapDirectory;
|
||||
|
||||
type DirectoryImpl = MmapDirectory;
|
||||
|
||||
fn make_directory() -> DirectoryImpl {
|
||||
MmapDirectory::create_from_tempdir().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_mmap_directory() {
|
||||
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||
test_directory(&mut mmap_directory);
|
||||
mod ram_directory_tests {
|
||||
use crate::directory::RAMDirectory;
|
||||
|
||||
type DirectoryImpl = RAMDirectory;
|
||||
|
||||
fn make_directory() -> DirectoryImpl {
|
||||
RAMDirectory::default()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple() {
|
||||
let mut directory = make_directory();
|
||||
super::test_simple(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_create_the_file() {
|
||||
let mut directory = make_directory();
|
||||
super::test_write_create_the_file(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_forbidden() {
|
||||
let mut directory = make_directory();
|
||||
super::test_rewrite_forbidden(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_delete() {
|
||||
let mut directory = make_directory();
|
||||
super::test_directory_delete(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_non_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_non_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lock_blocking() {
|
||||
let mut directory = make_directory();
|
||||
super::test_lock_blocking(&mut directory);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_watch() {
|
||||
let mut directory = make_directory();
|
||||
super::test_watch(&mut directory);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -97,16 +191,6 @@ fn test_directory_delete(directory: &mut dyn Directory) {
|
||||
assert!(directory.delete(&test_path).is_err());
|
||||
}
|
||||
|
||||
fn test_directory(directory: &mut dyn Directory) {
|
||||
test_simple(directory);
|
||||
test_rewrite_forbidden(directory);
|
||||
test_write_create_the_file(directory);
|
||||
test_directory_delete(directory);
|
||||
test_lock_non_blocking(directory);
|
||||
test_lock_blocking(directory);
|
||||
test_watch(directory);
|
||||
}
|
||||
|
||||
fn test_watch(directory: &mut dyn Directory) {
|
||||
let num_progress: Arc<AtomicUsize> = Default::default();
|
||||
let counter: Arc<AtomicUsize> = Default::default();
|
||||
@@ -175,9 +259,11 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
|
||||
assert!(lock_a_res.is_ok());
|
||||
let in_thread = Arc::new(AtomicBool::default());
|
||||
let in_thread_clone = in_thread.clone();
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
std::thread::spawn(move || {
|
||||
//< lock_a_res is sent to the thread.
|
||||
in_thread_clone.store(true, SeqCst);
|
||||
let _just_sync = block_on(receiver);
|
||||
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
||||
// to be part of the move, but the intent seems clearer that way.
|
||||
drop(lock_a_res);
|
||||
@@ -190,12 +276,18 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
|
||||
});
|
||||
assert!(lock_a_res.is_err());
|
||||
}
|
||||
{
|
||||
let lock_a_res = directory.acquire_lock(&Lock {
|
||||
let directory_clone = directory.box_clone();
|
||||
let (sender2, receiver2) = oneshot::channel();
|
||||
let join_handle = std::thread::spawn(move || {
|
||||
assert!(sender2.send(()).is_ok());
|
||||
let lock_a_res = directory_clone.acquire_lock(&Lock {
|
||||
filepath: PathBuf::from("a.lock"),
|
||||
is_blocking: true,
|
||||
});
|
||||
assert!(in_thread.load(SeqCst));
|
||||
assert!(lock_a_res.is_ok());
|
||||
}
|
||||
});
|
||||
assert!(block_on(receiver2).is_ok());
|
||||
assert!(sender.send(()).is_ok());
|
||||
assert!(join_handle.join().is_ok());
|
||||
}
|
||||
|
||||
@@ -24,13 +24,20 @@ pub struct WatchCallbackList {
|
||||
#[derive(Clone)]
|
||||
pub struct WatchHandle(Arc<WatchCallback>);
|
||||
|
||||
impl WatchHandle {
|
||||
/// Create a WatchHandle handle.
|
||||
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
||||
WatchHandle(watch_callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl WatchCallbackList {
|
||||
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||
let watch_callback_arc = Arc::new(watch_callback);
|
||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||
self.router.write().unwrap().push(watch_callback_weak);
|
||||
WatchHandle(watch_callback_arc)
|
||||
WatchHandle::new(watch_callback_arc)
|
||||
}
|
||||
|
||||
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
use std::io;
|
||||
|
||||
use crate::directory::error::LockError;
|
||||
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use crate::directory::error::{Incompatibility, LockError};
|
||||
use crate::fastfield::FastFieldNotAvailableError;
|
||||
use crate::query;
|
||||
use crate::schema;
|
||||
@@ -80,6 +80,9 @@ pub enum TantivyError {
|
||||
/// System error. (e.g.: We failed spawning a new thread)
|
||||
#[fail(display = "System error.'{}'", _0)]
|
||||
SystemError(String),
|
||||
/// Index incompatible with current version of tantivy
|
||||
#[fail(display = "{:?}", _0)]
|
||||
IncompatibleIndex(Incompatibility),
|
||||
}
|
||||
|
||||
impl From<DataCorruption> for TantivyError {
|
||||
@@ -129,6 +132,9 @@ impl From<OpenReadError> for TantivyError {
|
||||
match error {
|
||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
OpenReadError::IncompatibleIndex(incompatibility) => {
|
||||
TantivyError::IncompatibleIndex(incompatibility)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use crate::common::HasLen;
|
||||
use crate::common::{BitSet, HasLen};
|
||||
use crate::directory::ReadOnlySource;
|
||||
use crate::directory::WritePtr;
|
||||
use crate::space_usage::ByteCount;
|
||||
use crate::DocId;
|
||||
use bit_set::BitSet;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
@@ -17,7 +16,7 @@ pub fn write_delete_bitset(
|
||||
) -> io::Result<()> {
|
||||
let mut byte = 0u8;
|
||||
let mut shift = 0u8;
|
||||
for doc in 0..(max_doc as usize) {
|
||||
for doc in 0..max_doc {
|
||||
if delete_bitset.contains(doc) {
|
||||
byte |= 1 << shift;
|
||||
}
|
||||
@@ -32,7 +31,7 @@ pub fn write_delete_bitset(
|
||||
if max_doc % 8 > 0 {
|
||||
writer.write_all(&[byte])?;
|
||||
}
|
||||
writer.flush()
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set of deleted `DocId`s.
|
||||
@@ -86,7 +85,6 @@ impl HasLen for DeleteBitSet {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::directory::*;
|
||||
use bit_set::BitSet;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
||||
@@ -95,27 +93,26 @@ mod tests {
|
||||
{
|
||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
||||
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
||||
writer.terminate().unwrap();
|
||||
}
|
||||
{
|
||||
let source = directory.open_read(&test_path).unwrap();
|
||||
let delete_bitset = DeleteBitSet::open(source);
|
||||
for doc in 0..max_doc as usize {
|
||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), bitset.len());
|
||||
let source = directory.open_read(&test_path).unwrap();
|
||||
let delete_bitset = DeleteBitSet::open(source);
|
||||
for doc in 0..max_doc {
|
||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
||||
}
|
||||
assert_eq!(delete_bitset.len(), bitset.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_bitset() {
|
||||
{
|
||||
let mut bitset = BitSet::with_capacity(10);
|
||||
let mut bitset = BitSet::with_max_value(10);
|
||||
bitset.insert(1);
|
||||
bitset.insert(9);
|
||||
test_delete_bitset_helper(&bitset, 10);
|
||||
}
|
||||
{
|
||||
let mut bitset = BitSet::with_capacity(8);
|
||||
let mut bitset = BitSet::with_max_value(8);
|
||||
bitset.insert(1);
|
||||
bitset.insert(2);
|
||||
bitset.insert(3);
|
||||
|
||||
@@ -33,6 +33,7 @@ pub use self::reader::FastFieldReader;
|
||||
pub use self::readers::FastFieldReaders;
|
||||
pub use self::serializer::FastFieldSerializer;
|
||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||
use crate::chrono::{NaiveDateTime, Utc};
|
||||
use crate::common;
|
||||
use crate::schema::Cardinality;
|
||||
use crate::schema::FieldType;
|
||||
@@ -49,7 +50,7 @@ mod serializer;
|
||||
mod writer;
|
||||
|
||||
/// Trait for types that are allowed for fast fields: (u64, i64 and f64).
|
||||
pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
|
||||
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
||||
/// Converts a value from u64
|
||||
///
|
||||
/// Internally all fast field values are encoded as u64.
|
||||
@@ -69,6 +70,12 @@ pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
|
||||
/// Cast value to `u64`.
|
||||
/// The value is just reinterpreted in memory.
|
||||
fn as_u64(&self) -> u64;
|
||||
|
||||
/// Build a default value. This default value is never used, so the value does not
|
||||
/// really matter.
|
||||
fn make_zero() -> Self {
|
||||
Self::from_u64(0i64.to_u64())
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for u64 {
|
||||
@@ -135,11 +142,34 @@ impl FastValue for f64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl FastValue for crate::DateTime {
|
||||
fn from_u64(timestamp_u64: u64) -> Self {
|
||||
let timestamp_i64 = i64::from_u64(timestamp_u64);
|
||||
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
|
||||
}
|
||||
|
||||
fn to_u64(&self) -> u64 {
|
||||
self.timestamp().to_u64()
|
||||
}
|
||||
|
||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
||||
match *field_type {
|
||||
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn as_u64(&self) -> u64 {
|
||||
self.timestamp().as_u64()
|
||||
}
|
||||
}
|
||||
|
||||
fn value_to_u64(value: &Value) -> u64 {
|
||||
match *value {
|
||||
Value::U64(ref val) => *val,
|
||||
Value::I64(ref val) => common::i64_to_u64(*val),
|
||||
Value::F64(ref val) => common::f64_to_u64(*val),
|
||||
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
|
||||
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
|
||||
}
|
||||
}
|
||||
@@ -151,10 +181,12 @@ mod tests {
|
||||
use crate::common::CompositeFile;
|
||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||
use crate::fastfield::FastFieldReader;
|
||||
use crate::schema::Document;
|
||||
use crate::merge_policy::NoMergePolicy;
|
||||
use crate::schema::Field;
|
||||
use crate::schema::Schema;
|
||||
use crate::schema::FAST;
|
||||
use crate::schema::{Document, IntOptions};
|
||||
use crate::{Index, SegmentId, SegmentReader};
|
||||
use once_cell::sync::Lazy;
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::rngs::StdRng;
|
||||
@@ -178,6 +210,12 @@ mod tests {
|
||||
assert_eq!(test_fastfield.get(2), 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_fastfield_i64_u64() {
|
||||
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
|
||||
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_intfastfield_small() {
|
||||
let path = Path::new("test");
|
||||
@@ -429,6 +467,93 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge_missing_date_fast_field() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
||||
index_writer.commit().unwrap();
|
||||
index_writer.add_document(doc!());
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let segment_ids: Vec<SegmentId> = reader
|
||||
.searcher()
|
||||
.segment_readers()
|
||||
.iter()
|
||||
.map(SegmentReader::segment_id)
|
||||
.collect();
|
||||
assert_eq!(segment_ids.len(), 2);
|
||||
let merge_future = index_writer.merge(&segment_ids[..]);
|
||||
let merge_res = futures::executor::block_on(merge_future);
|
||||
assert!(merge_res.is_ok());
|
||||
assert!(reader.reload().is_ok());
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_datetime() {
|
||||
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datefastfield() {
|
||||
use crate::fastfield::FastValue;
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", FAST);
|
||||
let multi_date_field = schema_builder.add_date_field(
|
||||
"multi_date",
|
||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
||||
);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
index_writer.add_document(doc!(
|
||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
||||
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
|
||||
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
date_field => crate::DateTime::from_u64(4i64.to_u64())
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
|
||||
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
|
||||
));
|
||||
index_writer.commit().unwrap();
|
||||
let reader = index.reader().unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.segment_readers().len(), 1);
|
||||
let segment_reader = searcher.segment_reader(0);
|
||||
let fast_fields = segment_reader.fast_fields();
|
||||
let date_fast_field = fast_fields.date(date_field).unwrap();
|
||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
||||
let mut dates = vec![];
|
||||
{
|
||||
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
|
||||
dates_fast_field.get_vals(0u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].timestamp(), 2i64);
|
||||
assert_eq!(dates[1].timestamp(), 3i64);
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
|
||||
dates_fast_field.get_vals(1u32, &mut dates);
|
||||
assert!(dates.is_empty());
|
||||
}
|
||||
{
|
||||
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
|
||||
dates_fast_field.get_vals(2u32, &mut dates);
|
||||
assert_eq!(dates.len(), 2);
|
||||
assert_eq!(dates[0].timestamp(), 5i64);
|
||||
assert_eq!(dates[1].timestamp(), 6i64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
|
||||
@@ -45,7 +45,7 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||
let (start, stop) = self.range(doc);
|
||||
let len = (stop - start) as usize;
|
||||
vals.resize(len, Item::default());
|
||||
vals.resize(len, Item::make_zero());
|
||||
self.vals_reader.get_range_u64(start, &mut vals[..]);
|
||||
}
|
||||
|
||||
|
||||
@@ -15,9 +15,11 @@ pub struct FastFieldReaders {
|
||||
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
|
||||
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
|
||||
fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
|
||||
fast_field_date: HashMap<Field, FastFieldReader<crate::DateTime>>,
|
||||
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
|
||||
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
|
||||
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
|
||||
fast_field_dates: HashMap<Field, MultiValueIntFastFieldReader<crate::DateTime>>,
|
||||
fast_bytes: HashMap<Field, BytesFastFieldReader>,
|
||||
fast_fields_composite: CompositeFile,
|
||||
}
|
||||
@@ -26,6 +28,7 @@ enum FastType {
|
||||
I64,
|
||||
U64,
|
||||
F64,
|
||||
Date,
|
||||
}
|
||||
|
||||
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
||||
@@ -39,6 +42,9 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
|
||||
FieldType::F64(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::F64, cardinality)),
|
||||
FieldType::Date(options) => options
|
||||
.get_fastfield_cardinality()
|
||||
.map(|cardinality| (FastType::Date, cardinality)),
|
||||
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
|
||||
_ => None,
|
||||
}
|
||||
@@ -53,9 +59,11 @@ impl FastFieldReaders {
|
||||
fast_field_i64: Default::default(),
|
||||
fast_field_u64: Default::default(),
|
||||
fast_field_f64: Default::default(),
|
||||
fast_field_date: Default::default(),
|
||||
fast_field_i64s: Default::default(),
|
||||
fast_field_u64s: Default::default(),
|
||||
fast_field_f64s: Default::default(),
|
||||
fast_field_dates: Default::default(),
|
||||
fast_bytes: Default::default(),
|
||||
fast_fields_composite: fast_fields_composite.clone(),
|
||||
};
|
||||
@@ -95,6 +103,12 @@ impl FastFieldReaders {
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
FastType::Date => {
|
||||
fast_field_readers.fast_field_date.insert(
|
||||
field,
|
||||
FastFieldReader::open(fast_field_data.clone()),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||
@@ -130,6 +144,14 @@ impl FastFieldReaders {
|
||||
.fast_field_f64s
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
FastType::Date => {
|
||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
||||
let multivalued_int_fast_field =
|
||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
||||
fast_field_readers
|
||||
.fast_field_dates
|
||||
.insert(field, multivalued_int_fast_field);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||
@@ -156,8 +178,6 @@ impl FastFieldReaders {
|
||||
/// If the field is a i64-fast field, return the associated u64 reader. Values are
|
||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
|
||||
///
|
||||
///TODO should it also be lenient with f64?
|
||||
///
|
||||
/// This method is useful when merging segment reader.
|
||||
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
||||
if let Some(u64_ff_reader) = self.u64(field) {
|
||||
@@ -166,6 +186,12 @@ impl FastFieldReaders {
|
||||
if let Some(i64_ff_reader) = self.i64(field) {
|
||||
return Some(i64_ff_reader.into_u64_reader());
|
||||
}
|
||||
if let Some(f64_ff_reader) = self.f64(field) {
|
||||
return Some(f64_ff_reader.into_u64_reader());
|
||||
}
|
||||
if let Some(date_ff_reader) = self.date(field) {
|
||||
return Some(date_ff_reader.into_u64_reader());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -176,6 +202,13 @@ impl FastFieldReaders {
|
||||
self.fast_field_i64.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
||||
pub fn date(&self, field: Field) -> Option<FastFieldReader<crate::DateTime>> {
|
||||
self.fast_field_date.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns the `f64` fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a f64 fast field, this method returns `None`.
|
||||
@@ -202,6 +235,9 @@ impl FastFieldReaders {
|
||||
if let Some(i64s_ff_reader) = self.i64s(field) {
|
||||
return Some(i64s_ff_reader.into_u64s_reader());
|
||||
}
|
||||
if let Some(f64s_ff_reader) = self.f64s(field) {
|
||||
return Some(f64s_ff_reader.into_u64s_reader());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -219,6 +255,13 @@ impl FastFieldReaders {
|
||||
self.fast_field_f64s.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
|
||||
pub fn dates(&self, field: Field) -> Option<MultiValueIntFastFieldReader<crate::DateTime>> {
|
||||
self.fast_field_dates.get(&field).cloned()
|
||||
}
|
||||
|
||||
/// Returns the `bytes` fast field reader associated to `field`.
|
||||
///
|
||||
/// If `field` is not a bytes fast field, returns `None`.
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::common::BinarySerializable;
|
||||
use crate::common::VInt;
|
||||
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
||||
use crate::postings::UnorderedTermId;
|
||||
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
|
||||
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
|
||||
use crate::termdict::TermOrdinal;
|
||||
use fnv::FnvHashMap;
|
||||
use std::collections::HashMap;
|
||||
@@ -17,6 +17,14 @@ pub struct FastFieldsWriter {
|
||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||
}
|
||||
|
||||
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
|
||||
FieldType::F64(_) => common::f64_to_u64(0.0f64),
|
||||
_ => 0u64,
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldsWriter {
|
||||
/// Create all `FastFieldWriter` required by the schema.
|
||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||
@@ -25,18 +33,15 @@ impl FastFieldsWriter {
|
||||
let mut bytes_value_writers = Vec::new();
|
||||
|
||||
for (field, field_entry) in schema.fields() {
|
||||
let default_value = match *field_entry.field_type() {
|
||||
FieldType::I64(_) => common::i64_to_u64(0i64),
|
||||
FieldType::F64(_) => common::f64_to_u64(0.0f64),
|
||||
_ => 0u64,
|
||||
};
|
||||
match *field_entry.field_type() {
|
||||
FieldType::I64(ref int_options)
|
||||
| FieldType::U64(ref int_options)
|
||||
| FieldType::F64(ref int_options) => {
|
||||
| FieldType::F64(ref int_options)
|
||||
| FieldType::Date(ref int_options) => {
|
||||
match int_options.get_fastfield_cardinality() {
|
||||
Some(Cardinality::SingleValue) => {
|
||||
let mut fast_field_writer = IntFastFieldWriter::new(field);
|
||||
let default_value = fast_field_default_value(field_entry);
|
||||
fast_field_writer.set_val_if_missing(default_value);
|
||||
single_value_writers.push(fast_field_writer);
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use super::operation::DeleteOperation;
|
||||
use crate::Opstamp;
|
||||
use std::mem;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
// The DeleteQueue is similar in conceptually to a multiple
|
||||
// consumer single producer broadcast channel.
|
||||
@@ -14,14 +14,15 @@ use std::sync::{Arc, RwLock};
|
||||
//
|
||||
// New consumer can be created in two ways
|
||||
// - calling `delete_queue.cursor()` returns a cursor, that
|
||||
// will include all future delete operation (and no past operations).
|
||||
// will include all future delete operation (and some or none
|
||||
// of the past operations... The client is in charge of checking the opstamps.).
|
||||
// - cloning an existing cursor returns a new cursor, that
|
||||
// is at the exact same position, and can now advance independently
|
||||
// from the original cursor.
|
||||
#[derive(Default)]
|
||||
struct InnerDeleteQueue {
|
||||
writer: Vec<DeleteOperation>,
|
||||
last_block: Option<Arc<Block>>,
|
||||
last_block: Weak<Block>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -32,21 +33,31 @@ pub struct DeleteQueue {
|
||||
impl DeleteQueue {
|
||||
// Creates a new delete queue.
|
||||
pub fn new() -> DeleteQueue {
|
||||
let delete_queue = DeleteQueue {
|
||||
DeleteQueue {
|
||||
inner: Arc::default(),
|
||||
};
|
||||
|
||||
let next_block = NextBlock::from(delete_queue.clone());
|
||||
|
||||
{
|
||||
let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
|
||||
delete_queue_wlock.last_block = Some(Arc::new(Block {
|
||||
operations: Arc::default(),
|
||||
next: next_block,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
delete_queue
|
||||
fn get_last_block(&self) -> Arc<Block> {
|
||||
{
|
||||
// try get the last block with simply acquiring the read lock.
|
||||
let rlock = self.inner.read().unwrap();
|
||||
if let Some(block) = rlock.last_block.upgrade() {
|
||||
return block;
|
||||
}
|
||||
}
|
||||
// It failed. Let's double check after acquiring the write, as someone could have called
|
||||
// `get_last_block` right after we released the rlock.
|
||||
let mut wlock = self.inner.write().unwrap();
|
||||
if let Some(block) = wlock.last_block.upgrade() {
|
||||
return block;
|
||||
}
|
||||
let block = Arc::new(Block {
|
||||
operations: Arc::default(),
|
||||
next: NextBlock::from(self.clone()),
|
||||
});
|
||||
wlock.last_block = Arc::downgrade(&block);
|
||||
block
|
||||
}
|
||||
|
||||
// Creates a new cursor that makes it possible to
|
||||
@@ -54,17 +65,7 @@ impl DeleteQueue {
|
||||
//
|
||||
// Past delete operations are not accessible.
|
||||
pub fn cursor(&self) -> DeleteCursor {
|
||||
let last_block = self
|
||||
.inner
|
||||
.read()
|
||||
.expect("Read lock poisoned when opening delete queue cursor")
|
||||
.last_block
|
||||
.clone()
|
||||
.expect(
|
||||
"Failed to unwrap last_block. This should never happen
|
||||
as the Option<> is only here to make
|
||||
initialization possible",
|
||||
);
|
||||
let last_block = self.get_last_block();
|
||||
let operations_len = last_block.operations.len();
|
||||
DeleteCursor {
|
||||
block: last_block,
|
||||
@@ -100,23 +101,19 @@ impl DeleteQueue {
|
||||
.write()
|
||||
.expect("Failed to acquire write lock on delete queue writer");
|
||||
|
||||
let delete_operations;
|
||||
{
|
||||
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
|
||||
if writer.is_empty() {
|
||||
return None;
|
||||
}
|
||||
delete_operations = mem::replace(writer, vec![]);
|
||||
if self_wlock.writer.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let next_block = NextBlock::from(self.clone());
|
||||
{
|
||||
self_wlock.last_block = Some(Arc::new(Block {
|
||||
operations: Arc::new(delete_operations),
|
||||
next: next_block,
|
||||
}));
|
||||
}
|
||||
self_wlock.last_block.clone()
|
||||
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
||||
|
||||
let new_block = Arc::new(Block {
|
||||
operations: Arc::new(delete_operations.into_boxed_slice()),
|
||||
next: NextBlock::from(self.clone()),
|
||||
});
|
||||
|
||||
self_wlock.last_block = Arc::downgrade(&new_block);
|
||||
Some(new_block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,7 +167,7 @@ impl NextBlock {
|
||||
}
|
||||
|
||||
struct Block {
|
||||
operations: Arc<Vec<DeleteOperation>>,
|
||||
operations: Arc<Box<[DeleteOperation]>>,
|
||||
next: NextBlock,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,37 +1,40 @@
|
||||
use super::operation::{AddOperation, UserOperation};
|
||||
use super::segment_updater::SegmentUpdater;
|
||||
use super::PreparedCommit;
|
||||
use crate::common::BitSet;
|
||||
use crate::core::Index;
|
||||
use crate::core::Segment;
|
||||
use crate::core::SegmentComponent;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::core::SegmentReader;
|
||||
use crate::directory::TerminatingWrite;
|
||||
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
||||
use crate::directory::{TerminatingWrite, WatchCallbackList};
|
||||
use crate::docset::DocSet;
|
||||
use crate::error::TantivyError;
|
||||
use crate::fastfield::write_delete_bitset;
|
||||
use crate::indexer::delete_queue::{DeleteCursor, DeleteQueue};
|
||||
use crate::indexer::doc_opstamp_mapping::DocToOpstampMapping;
|
||||
use crate::indexer::operation::DeleteOperation;
|
||||
use crate::indexer::segment_manager::SegmentRegisters;
|
||||
use crate::indexer::segment_register::SegmentRegister;
|
||||
use crate::indexer::stamper::Stamper;
|
||||
use crate::indexer::MergePolicy;
|
||||
use crate::indexer::SegmentEntry;
|
||||
use crate::indexer::SegmentWriter;
|
||||
use crate::reader::NRTReader;
|
||||
use crate::schema::Document;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::Term;
|
||||
use crate::Opstamp;
|
||||
use bit_set::BitSet;
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
use crate::{IndexReader, Opstamp};
|
||||
use crossbeam::channel;
|
||||
use futures::executor::block_on;
|
||||
use futures::future::Future;
|
||||
use smallvec::smallvec;
|
||||
use smallvec::SmallVec;
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::thread::JoinHandle;
|
||||
|
||||
@@ -68,6 +71,8 @@ pub struct IndexWriter {
|
||||
// lifetime of the lock with that of the IndexWriter.
|
||||
_directory_lock: Option<DirectoryLock>,
|
||||
|
||||
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||
|
||||
index: Index,
|
||||
|
||||
heap_size_in_bytes_per_thread: usize,
|
||||
@@ -87,6 +92,8 @@ pub struct IndexWriter {
|
||||
|
||||
stamper: Stamper,
|
||||
committed_opstamp: Opstamp,
|
||||
|
||||
on_commit: WatchCallbackList,
|
||||
}
|
||||
|
||||
fn compute_deleted_bitset(
|
||||
@@ -115,7 +122,7 @@ fn compute_deleted_bitset(
|
||||
while docset.advance() {
|
||||
let deleted_doc = docset.doc();
|
||||
if deleted_doc < limit_doc {
|
||||
delete_bitset.insert(deleted_doc as usize);
|
||||
delete_bitset.insert(deleted_doc);
|
||||
might_have_changed = true;
|
||||
}
|
||||
}
|
||||
@@ -126,52 +133,78 @@ fn compute_deleted_bitset(
|
||||
Ok(might_have_changed)
|
||||
}
|
||||
|
||||
/// Advance delete for the given segment up
|
||||
/// to the target opstamp.
|
||||
/// Advance delete for the given segment up to the target opstamp.
|
||||
///
|
||||
/// Note that there are no guarantee that the resulting `segment_entry` delete_opstamp
|
||||
/// is `==` target_opstamp.
|
||||
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
||||
/// the `target_opstamp`, `segment_entry` is not updated.
|
||||
pub(crate) fn advance_deletes(
|
||||
mut segment: Segment,
|
||||
segment_entry: &mut SegmentEntry,
|
||||
target_opstamp: Opstamp,
|
||||
) -> crate::Result<()> {
|
||||
{
|
||||
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
||||
// We are already up-to-date here.
|
||||
return Ok(());
|
||||
}
|
||||
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
||||
// We are already up-to-date here.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let segment_reader = SegmentReader::open(&segment)?;
|
||||
let delete_bitset_opt = segment_entry.take_delete_bitset();
|
||||
|
||||
let max_doc = segment_reader.max_doc();
|
||||
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
||||
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
||||
None => BitSet::with_capacity(max_doc as usize),
|
||||
};
|
||||
// We avoid directly advancing the `SegmentEntry` delete cursor, because
|
||||
// we do not want to end up in an invalid state if the delete bitset
|
||||
// serialization fails.
|
||||
let mut delete_cursor = segment_entry.delete_cursor();
|
||||
|
||||
let delete_cursor = segment_entry.delete_cursor();
|
||||
compute_deleted_bitset(
|
||||
&mut delete_bitset,
|
||||
&segment_reader,
|
||||
delete_cursor,
|
||||
&DocToOpstampMapping::None,
|
||||
target_opstamp,
|
||||
)?;
|
||||
if delete_bitset_opt.is_none() && delete_cursor.get().is_none() {
|
||||
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// TODO optimize
|
||||
// We open our current serialized segment to compute the new deleted bitset.
|
||||
let segment = segment_entry.segment().clone();
|
||||
let segment_reader = SegmentReader::open(&segment)?;
|
||||
|
||||
let max_doc = segment_reader.max_doc();
|
||||
let mut delete_bitset: BitSet =
|
||||
delete_bitset_opt.unwrap_or_else(|| BitSet::with_max_value(max_doc));
|
||||
|
||||
let num_deleted_docs_before = segment.meta().num_deleted_docs();
|
||||
|
||||
compute_deleted_bitset(
|
||||
&mut delete_bitset,
|
||||
&segment_reader,
|
||||
&mut delete_cursor,
|
||||
&DocToOpstampMapping::None,
|
||||
target_opstamp,
|
||||
)?;
|
||||
|
||||
// TODO optimize... We are simply manipulating bitsets here.
|
||||
// We should be able to compute the union much faster.
|
||||
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
|
||||
for doc in 0u32..max_doc {
|
||||
if segment_reader.is_deleted(doc) {
|
||||
delete_bitset.insert(doc as usize);
|
||||
if seg_delete_bitset.is_deleted(doc) {
|
||||
delete_bitset.insert(doc);
|
||||
}
|
||||
}
|
||||
|
||||
let num_deleted_docs = delete_bitset.len();
|
||||
if num_deleted_docs > 0 {
|
||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
||||
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
||||
delete_file.terminate()?;
|
||||
}
|
||||
}
|
||||
segment_entry.set_meta(segment.meta().clone());
|
||||
|
||||
let num_deleted_docs = delete_bitset.len() as u32;
|
||||
|
||||
if num_deleted_docs > num_deleted_docs_before {
|
||||
// We need to write a new delete file.
|
||||
let mut delete_file = segment
|
||||
.with_delete_meta(num_deleted_docs, target_opstamp)
|
||||
.open_write(SegmentComponent::DELETE)?;
|
||||
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
||||
delete_file.terminate()?;
|
||||
segment_entry.reset_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||
}
|
||||
|
||||
// Regardless of whether we did end up having to write a new file or not
|
||||
// we advance the `delete_cursor`. This is an optimisation. We want to ensure we do not
|
||||
// check that a given deleted term does not match any of our docs more than once.
|
||||
segment_entry.set_delete_cursor(delete_cursor);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -180,11 +213,12 @@ fn index_documents(
|
||||
segment: Segment,
|
||||
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
||||
segment_updater: &mut SegmentUpdater,
|
||||
tokenizers: &TokenizerManager,
|
||||
mut delete_cursor: DeleteCursor,
|
||||
) -> crate::Result<bool> {
|
||||
let mut segment_writer =
|
||||
SegmentWriter::for_segment(memory_budget, segment.clone(), tokenizers)?;
|
||||
let schema = segment.schema();
|
||||
|
||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
||||
for document_group in grouped_document_iterator {
|
||||
for doc in document_group {
|
||||
segment_writer.add_document(doc, &schema)?;
|
||||
@@ -222,11 +256,7 @@ fn index_documents(
|
||||
last_docstamp,
|
||||
)?;
|
||||
|
||||
let segment_entry = SegmentEntry::new(
|
||||
segment_with_max_doc.meta().clone(),
|
||||
delete_cursor,
|
||||
delete_bitset_opt,
|
||||
);
|
||||
let segment_entry = SegmentEntry::new(segment_with_max_doc, delete_cursor, delete_bitset_opt);
|
||||
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
||||
Ok(true)
|
||||
}
|
||||
@@ -236,7 +266,7 @@ fn apply_deletes(
|
||||
mut delete_cursor: &mut DeleteCursor,
|
||||
doc_opstamps: &[Opstamp],
|
||||
last_docstamp: Opstamp,
|
||||
) -> crate::Result<Option<BitSet<u32>>> {
|
||||
) -> crate::Result<Option<BitSet>> {
|
||||
if delete_cursor.get().is_none() {
|
||||
// if there are no delete operation in the queue, no need
|
||||
// to even open the segment.
|
||||
@@ -246,7 +276,7 @@ fn apply_deletes(
|
||||
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
||||
|
||||
let max_doc = segment.meta().max_doc();
|
||||
let mut deleted_bitset = BitSet::with_capacity(max_doc as usize);
|
||||
let mut deleted_bitset = BitSet::with_max_value(max_doc);
|
||||
let may_have_deletes = compute_deleted_bitset(
|
||||
&mut deleted_bitset,
|
||||
&segment_reader,
|
||||
@@ -298,16 +328,24 @@ impl IndexWriter {
|
||||
|
||||
let delete_queue = DeleteQueue::new();
|
||||
|
||||
let current_opstamp = index.load_metas()?.opstamp;
|
||||
let meta = index.load_metas()?;
|
||||
|
||||
let stamper = Stamper::new(current_opstamp);
|
||||
let stamper = Stamper::new(meta.opstamp);
|
||||
|
||||
let commited_segments = SegmentRegister::new(
|
||||
index.directory(),
|
||||
&index.schema(),
|
||||
meta.segments,
|
||||
&delete_queue.cursor(),
|
||||
);
|
||||
let segment_registers = Arc::new(RwLock::new(SegmentRegisters::new(commited_segments)));
|
||||
|
||||
let segment_updater =
|
||||
SegmentUpdater::create(index.clone(), stamper.clone(), &delete_queue.cursor())?;
|
||||
SegmentUpdater::create(segment_registers.clone(), index.clone(), stamper.clone())?;
|
||||
|
||||
let mut index_writer = IndexWriter {
|
||||
_directory_lock: Some(directory_lock),
|
||||
|
||||
segment_registers,
|
||||
heap_size_in_bytes_per_thread,
|
||||
index: index.clone(),
|
||||
|
||||
@@ -321,21 +359,28 @@ impl IndexWriter {
|
||||
|
||||
delete_queue,
|
||||
|
||||
committed_opstamp: current_opstamp,
|
||||
committed_opstamp: meta.opstamp,
|
||||
stamper,
|
||||
|
||||
worker_id: 0,
|
||||
|
||||
on_commit: Default::default(),
|
||||
};
|
||||
index_writer.start_workers()?;
|
||||
Ok(index_writer)
|
||||
}
|
||||
|
||||
fn drop_sender(&mut self) {
|
||||
let (sender, _receiver) = channel::bounded(1);
|
||||
mem::replace(&mut self.operation_sender, sender);
|
||||
}
|
||||
|
||||
/// If there are some merging threads, blocks until they all finish their work and
|
||||
/// then drop the `IndexWriter`.
|
||||
pub fn wait_merging_threads(mut self) -> crate::Result<()> {
|
||||
// this will stop the indexing thread,
|
||||
// dropping the last reference to the segment_updater.
|
||||
drop(self.operation_sender);
|
||||
self.drop_sender();
|
||||
|
||||
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
|
||||
for join_handle in former_workers_handles {
|
||||
@@ -346,7 +391,6 @@ impl IndexWriter {
|
||||
TantivyError::ErrorInThread("Error in indexing worker thread.".into())
|
||||
})?;
|
||||
}
|
||||
drop(self.workers_join_handle);
|
||||
|
||||
let result = self
|
||||
.segment_updater
|
||||
@@ -360,13 +404,6 @@ impl IndexWriter {
|
||||
result
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
||||
let delete_cursor = self.delete_queue.cursor();
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
||||
block_on(self.segment_updater.schedule_add_segment(segment_entry))
|
||||
}
|
||||
|
||||
/// Creates a new segment.
|
||||
///
|
||||
/// This method is useful only for users trying to do complex
|
||||
@@ -415,12 +452,13 @@ impl IndexWriter {
|
||||
// was dropped.
|
||||
return Ok(());
|
||||
}
|
||||
let segment = index.new_segment();
|
||||
let segment = index.new_segment_unpersisted();
|
||||
index_documents(
|
||||
mem_budget,
|
||||
segment,
|
||||
&mut document_iterator,
|
||||
&mut segment_updater,
|
||||
index.tokenizers(),
|
||||
delete_cursor.clone(),
|
||||
)?;
|
||||
}
|
||||
@@ -447,6 +485,21 @@ impl IndexWriter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO move me
|
||||
pub(crate) fn trigger_commit(&self) -> impl Future<Output = ()> {
|
||||
self.on_commit.broadcast()
|
||||
}
|
||||
|
||||
pub fn reader(&self, num_searchers: usize) -> crate::Result<IndexReader> {
|
||||
let nrt_reader = NRTReader::create(
|
||||
num_searchers,
|
||||
self.index.clone(),
|
||||
self.segment_registers.clone(),
|
||||
&self.on_commit,
|
||||
)?;
|
||||
Ok(IndexReader::NRT(nrt_reader))
|
||||
}
|
||||
|
||||
/// Detects and removes the files that are not used by the index anymore.
|
||||
pub fn garbage_collect_files(
|
||||
&self,
|
||||
@@ -501,9 +554,13 @@ impl IndexWriter {
|
||||
/// Merges a given list of segments
|
||||
///
|
||||
/// `segment_ids` is required to be non-empty.
|
||||
pub async fn merge(&mut self, segment_ids: &[SegmentId]) -> crate::Result<SegmentMeta> {
|
||||
pub fn merge(
|
||||
&mut self,
|
||||
segment_ids: &[SegmentId],
|
||||
) -> impl Future<Output = crate::Result<SegmentMeta>> {
|
||||
let merge_operation = self.segment_updater.make_merge_operation(segment_ids);
|
||||
self.segment_updater.start_merge(merge_operation)?.await
|
||||
let segment_updater = self.segment_updater.clone();
|
||||
async move { segment_updater.start_merge(merge_operation)?.await }
|
||||
}
|
||||
|
||||
/// Closes the current document channel send.
|
||||
@@ -586,7 +643,7 @@ impl IndexWriter {
|
||||
/// It is also possible to add a payload to the `commit`
|
||||
/// using this API.
|
||||
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
|
||||
pub fn prepare_commit(&mut self, soft_commit: bool) -> crate::Result<PreparedCommit> {
|
||||
// Here, because we join all of the worker threads,
|
||||
// all of the segment update for this commit have been
|
||||
// sent.
|
||||
@@ -614,7 +671,7 @@ impl IndexWriter {
|
||||
}
|
||||
|
||||
let commit_opstamp = self.stamper.stamp();
|
||||
let prepared_commit = PreparedCommit::new(self, commit_opstamp);
|
||||
let prepared_commit = PreparedCommit::new(self, commit_opstamp, soft_commit);
|
||||
info!("Prepared commit {}", commit_opstamp);
|
||||
Ok(prepared_commit)
|
||||
}
|
||||
@@ -634,7 +691,11 @@ impl IndexWriter {
|
||||
/// that made it in the commit.
|
||||
///
|
||||
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
||||
self.prepare_commit()?.commit()
|
||||
self.prepare_commit(false)?.commit()
|
||||
}
|
||||
|
||||
pub fn soft_commit(&mut self) -> crate::Result<Opstamp> {
|
||||
self.prepare_commit(true)?.commit()
|
||||
}
|
||||
|
||||
pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
|
||||
@@ -747,16 +808,25 @@ impl IndexWriter {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for IndexWriter {
|
||||
fn drop(&mut self) {
|
||||
self.segment_updater.kill();
|
||||
self.drop_sender();
|
||||
for work in self.workers_join_handle.drain(..) {
|
||||
let _ = work.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::super::operation::UserOperation;
|
||||
use crate::collector::TopDocs;
|
||||
use crate::directory::error::LockError;
|
||||
use crate::error::*;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::{self, IndexRecordOption};
|
||||
use crate::schema::{self, IndexRecordOption, STRING};
|
||||
use crate::Index;
|
||||
use crate::ReloadPolicy;
|
||||
use crate::Term;
|
||||
@@ -982,7 +1052,8 @@ mod tests {
|
||||
index_writer.add_document(doc!(text_field => "a"));
|
||||
}
|
||||
{
|
||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
||||
let mut prepared_commit =
|
||||
index_writer.prepare_commit(false).expect("commit failed");
|
||||
prepared_commit.set_payload("first commit");
|
||||
prepared_commit.commit().expect("commit failed");
|
||||
}
|
||||
@@ -1015,7 +1086,8 @@ mod tests {
|
||||
index_writer.add_document(doc!(text_field => "a"));
|
||||
}
|
||||
{
|
||||
let mut prepared_commit = index_writer.prepare_commit().expect("commit failed");
|
||||
let mut prepared_commit =
|
||||
index_writer.prepare_commit(false).expect("commit failed");
|
||||
prepared_commit.set_payload("first commit");
|
||||
prepared_commit.abort().expect("commit failed");
|
||||
}
|
||||
@@ -1181,4 +1253,52 @@ mod tests {
|
||||
assert!(clear_again.is_ok());
|
||||
assert!(commit_again.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_doc_missing_field() {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let idfield = schema_builder.add_text_field("id", STRING);
|
||||
schema_builder.add_text_field("optfield", STRING);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_reader() {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let idfield = schema_builder.add_text_field("id", STRING);
|
||||
schema_builder.add_text_field("optfield", STRING);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
let reader = index_writer.reader(2).unwrap();
|
||||
let searcher = reader.searcher();
|
||||
assert_eq!(searcher.num_docs(), 1u64);
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
assert!(index_writer.commit().is_ok());
|
||||
assert_eq!(reader.searcher().num_docs(), 2u64);
|
||||
assert_eq!(searcher.num_docs(), 1u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_writer_reader_soft_commit() {
|
||||
let mut schema_builder = schema::Schema::builder();
|
||||
let idfield = schema_builder.add_text_field("id", STRING);
|
||||
schema_builder.add_text_field("optfield", STRING);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
index_writer.add_document(doc!(idfield=>"myid"));
|
||||
assert!(index_writer.soft_commit().is_ok());
|
||||
let nrt_reader = index_writer.reader(2).unwrap();
|
||||
let normal_reader = index.reader_builder().try_into().unwrap();
|
||||
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||
assert_eq!(normal_reader.searcher().num_docs(), 0u64);
|
||||
assert!(index_writer.commit().is_ok());
|
||||
assert!(normal_reader.reload().is_ok());
|
||||
assert_eq!(nrt_reader.searcher().num_docs(), 1u64);
|
||||
assert_eq!(normal_reader.searcher().num_docs(), 1u64);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1492,4 +1492,46 @@ mod tests {
|
||||
assert_eq!(&vals, &[20]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
|
||||
let mut builder = schema::SchemaBuilder::new();
|
||||
|
||||
let fast_multi = IntOptions::default().set_fast(Cardinality::MultiValues);
|
||||
|
||||
let field = builder.add_f64_field("f64", schema::FAST);
|
||||
let multi_field = builder.add_f64_field("f64s", fast_multi);
|
||||
|
||||
let index = Index::create_in_ram(builder.build());
|
||||
|
||||
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
|
||||
// Make sure we'll attempt to merge every created segment
|
||||
let mut policy = crate::indexer::LogMergePolicy::default();
|
||||
policy.set_min_merge_size(2);
|
||||
writer.set_merge_policy(Box::new(policy));
|
||||
|
||||
for i in 0..100 {
|
||||
let mut doc = Document::new();
|
||||
doc.add_f64(field, 42.0);
|
||||
|
||||
doc.add_f64(multi_field, 0.24);
|
||||
doc.add_f64(multi_field, 0.27);
|
||||
|
||||
writer.add_document(doc);
|
||||
|
||||
if i % 5 == 0 {
|
||||
writer.commit()?;
|
||||
}
|
||||
}
|
||||
|
||||
writer.commit()?;
|
||||
writer.wait_merging_threads()?;
|
||||
|
||||
// If a merging thread fails, we should end up with more
|
||||
// than one segment here
|
||||
assert_eq!(1, index.searchable_segments()?.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||
pub use self::prepared_commit::PreparedCommit;
|
||||
pub use self::segment_entry::SegmentEntry;
|
||||
pub use self::segment_manager::SegmentManager;
|
||||
pub(crate) use self::segment_manager::SegmentRegisters;
|
||||
pub use self::segment_serializer::SegmentSerializer;
|
||||
pub use self::segment_writer::SegmentWriter;
|
||||
|
||||
@@ -33,6 +34,7 @@ pub type DefaultMergePolicy = LogMergePolicy;
|
||||
mod tests {
|
||||
use crate::schema::{self, Schema};
|
||||
use crate::{Index, Term};
|
||||
|
||||
#[test]
|
||||
fn test_advance_delete_bug() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
@@ -8,14 +8,20 @@ pub struct PreparedCommit<'a> {
|
||||
index_writer: &'a mut IndexWriter,
|
||||
payload: Option<String>,
|
||||
opstamp: Opstamp,
|
||||
soft_commit: bool,
|
||||
}
|
||||
|
||||
impl<'a> PreparedCommit<'a> {
|
||||
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: Opstamp) -> PreparedCommit<'_> {
|
||||
pub(crate) fn new(
|
||||
index_writer: &'a mut IndexWriter,
|
||||
opstamp: Opstamp,
|
||||
soft_commit: bool,
|
||||
) -> PreparedCommit {
|
||||
PreparedCommit {
|
||||
index_writer,
|
||||
payload: None,
|
||||
opstamp,
|
||||
soft_commit,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,11 +39,12 @@ impl<'a> PreparedCommit<'a> {
|
||||
|
||||
pub fn commit(self) -> Result<Opstamp> {
|
||||
info!("committing {}", self.opstamp);
|
||||
let _ = block_on(
|
||||
self.index_writer
|
||||
.segment_updater()
|
||||
.schedule_commit(self.opstamp, self.payload),
|
||||
);
|
||||
block_on(self.index_writer.segment_updater().schedule_commit(
|
||||
self.opstamp,
|
||||
self.payload,
|
||||
self.soft_commit,
|
||||
))?;
|
||||
block_on(self.index_writer.trigger_commit());
|
||||
Ok(self.opstamp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use crate::common::BitSet;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::ManagedDirectory;
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use bit_set::BitSet;
|
||||
use crate::{Opstamp, Segment};
|
||||
use std::fmt;
|
||||
|
||||
/// A segment entry describes the state of
|
||||
@@ -19,55 +21,81 @@ use std::fmt;
|
||||
/// in the .del file or in the `delete_bitset`.
|
||||
#[derive(Clone)]
|
||||
pub struct SegmentEntry {
|
||||
meta: SegmentMeta,
|
||||
segment: Segment,
|
||||
delete_bitset: Option<BitSet>,
|
||||
delete_cursor: DeleteCursor,
|
||||
}
|
||||
|
||||
impl SegmentEntry {
|
||||
/// Create a new `SegmentEntry`
|
||||
pub fn new(
|
||||
segment_meta: SegmentMeta,
|
||||
pub(crate) fn new(
|
||||
segment: Segment,
|
||||
delete_cursor: DeleteCursor,
|
||||
delete_bitset: Option<BitSet>,
|
||||
) -> SegmentEntry {
|
||||
SegmentEntry {
|
||||
meta: segment_meta,
|
||||
segment,
|
||||
delete_bitset,
|
||||
delete_cursor,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a reference to the segment entry deleted bitset.
|
||||
pub fn persist(&mut self, dest_directory: ManagedDirectory) -> crate::Result<()> {
|
||||
// TODO take in account delete bitset?
|
||||
self.segment.persist(dest_directory)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn segment(&self) -> &Segment {
|
||||
&self.segment
|
||||
}
|
||||
|
||||
/// `Takes` (as in Option::take) the delete bitset of
|
||||
/// a segment entry.
|
||||
///
|
||||
/// `DocId` in this bitset are flagged as deleted.
|
||||
pub fn delete_bitset(&self) -> Option<&BitSet> {
|
||||
self.delete_bitset.as_ref()
|
||||
pub fn take_delete_bitset(&mut self) -> Option<BitSet> {
|
||||
self.delete_bitset.take()
|
||||
}
|
||||
|
||||
/// Set the `SegmentMeta` for this segment.
|
||||
pub fn set_meta(&mut self, segment_meta: SegmentMeta) {
|
||||
self.meta = segment_meta;
|
||||
/// Reset the delete informmationo in this segment.
|
||||
///
|
||||
/// The `SegmentEntry` segment's `SegmentMeta` gets updated, and
|
||||
/// any delete bitset is drop and set to None.
|
||||
pub fn reset_delete_meta(&mut self, num_deleted_docs: u32, target_opstamp: Opstamp) {
|
||||
self.segment = self
|
||||
.segment
|
||||
.clone()
|
||||
.with_delete_meta(num_deleted_docs, target_opstamp);
|
||||
self.delete_bitset = None;
|
||||
}
|
||||
|
||||
pub fn set_delete_cursor(&mut self, delete_cursor: DeleteCursor) {
|
||||
self.delete_cursor = delete_cursor;
|
||||
}
|
||||
/// Return a reference to the segment_entry's delete cursor
|
||||
pub fn delete_cursor(&mut self) -> &mut DeleteCursor {
|
||||
&mut self.delete_cursor
|
||||
pub fn delete_cursor(&mut self) -> DeleteCursor {
|
||||
self.delete_cursor.clone()
|
||||
}
|
||||
|
||||
/// Returns the segment id.
|
||||
pub fn segment_id(&self) -> SegmentId {
|
||||
self.meta.id()
|
||||
self.segment.id()
|
||||
}
|
||||
|
||||
/// Accessor to the `SegmentMeta`
|
||||
pub fn meta(&self) -> &SegmentMeta {
|
||||
&self.meta
|
||||
self.segment.meta()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentEntry {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(formatter, "SegmentEntry({:?})", self.meta)
|
||||
let num_deletes = self.delete_bitset.as_ref().map(|bitset| bitset.len());
|
||||
write!(
|
||||
formatter,
|
||||
"SegmentEntry(seg={:?}, ndel={:?})",
|
||||
self.segment, num_deletes
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
use super::segment_register::SegmentRegister;
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::error::TantivyError;
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use crate::indexer::SegmentEntry;
|
||||
use crate::Result as TantivyResult;
|
||||
use crate::Segment;
|
||||
use std::collections::hash_set::HashSet;
|
||||
use std::fmt::{self, Debug, Formatter};
|
||||
use std::sync::RwLock;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
#[derive(Default)]
|
||||
struct SegmentRegisters {
|
||||
pub(crate) struct SegmentRegisters {
|
||||
uncommitted: SegmentRegister,
|
||||
committed: SegmentRegister,
|
||||
}
|
||||
@@ -23,6 +20,17 @@ pub(crate) enum SegmentsStatus {
|
||||
}
|
||||
|
||||
impl SegmentRegisters {
|
||||
pub fn new(committed: SegmentRegister) -> SegmentRegisters {
|
||||
SegmentRegisters {
|
||||
uncommitted: Default::default(),
|
||||
committed,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn committed_segment(&self) -> Vec<Segment> {
|
||||
self.committed.segments()
|
||||
}
|
||||
|
||||
/// Check if all the segments are committed or uncommited.
|
||||
///
|
||||
/// If some segment is missing or segments are in a different state (this should not happen
|
||||
@@ -45,18 +53,7 @@ impl SegmentRegisters {
|
||||
/// changes (merges especially)
|
||||
#[derive(Default)]
|
||||
pub struct SegmentManager {
|
||||
registers: RwLock<SegmentRegisters>,
|
||||
}
|
||||
|
||||
impl Debug for SegmentManager {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
let lock = self.read();
|
||||
write!(
|
||||
f,
|
||||
"{{ uncommitted: {:?}, committed: {:?} }}",
|
||||
lock.uncommitted, lock.committed
|
||||
)
|
||||
}
|
||||
registers: Arc<RwLock<SegmentRegisters>>,
|
||||
}
|
||||
|
||||
pub fn get_mergeable_segments(
|
||||
@@ -75,16 +72,8 @@ pub fn get_mergeable_segments(
|
||||
}
|
||||
|
||||
impl SegmentManager {
|
||||
pub fn from_segments(
|
||||
segment_metas: Vec<SegmentMeta>,
|
||||
delete_cursor: &DeleteCursor,
|
||||
) -> SegmentManager {
|
||||
SegmentManager {
|
||||
registers: RwLock::new(SegmentRegisters {
|
||||
uncommitted: SegmentRegister::default(),
|
||||
committed: SegmentRegister::new(segment_metas, delete_cursor),
|
||||
}),
|
||||
}
|
||||
pub(crate) fn new(registers: Arc<RwLock<SegmentRegisters>>) -> SegmentManager {
|
||||
SegmentManager { registers }
|
||||
}
|
||||
|
||||
/// Returns all of the segment entries (committed or uncommitted)
|
||||
@@ -145,7 +134,7 @@ impl SegmentManager {
|
||||
/// Returns an error if some segments are missing, or if
|
||||
/// the `segment_ids` are not either all committed or all
|
||||
/// uncommitted.
|
||||
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> {
|
||||
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> crate::Result<Vec<SegmentEntry>> {
|
||||
let registers_lock = self.read();
|
||||
let mut segment_entries = vec![];
|
||||
if registers_lock.uncommitted.contains_all(segment_ids) {
|
||||
@@ -166,7 +155,7 @@ impl SegmentManager {
|
||||
let error_msg = "Merge operation sent for segments that are not \
|
||||
all uncommited or commited."
|
||||
.to_string();
|
||||
return Err(TantivyError::InvalidArgument(error_msg));
|
||||
return Err(crate::Error::InvalidArgument(error_msg));
|
||||
}
|
||||
Ok(segment_entries)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use crate::core::SegmentId;
|
||||
use crate::core::SegmentMeta;
|
||||
use crate::directory::ManagedDirectory;
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use crate::indexer::segment_entry::SegmentEntry;
|
||||
use crate::schema::Schema;
|
||||
use crate::Segment;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::{self, Debug, Formatter};
|
||||
@@ -46,6 +49,13 @@ impl SegmentRegister {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn segments(&self) -> Vec<Segment> {
|
||||
self.segment_states
|
||||
.values()
|
||||
.map(|segment_entry| segment_entry.segment().clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
|
||||
self.segment_states.values().cloned().collect()
|
||||
}
|
||||
@@ -79,11 +89,17 @@ impl SegmentRegister {
|
||||
self.segment_states.get(segment_id).cloned()
|
||||
}
|
||||
|
||||
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister {
|
||||
pub fn new(
|
||||
directory: &ManagedDirectory,
|
||||
schema: &Schema,
|
||||
segment_metas: Vec<SegmentMeta>,
|
||||
delete_cursor: &DeleteCursor,
|
||||
) -> SegmentRegister {
|
||||
let mut segment_states = HashMap::new();
|
||||
for segment_meta in segment_metas {
|
||||
let segment_id = segment_meta.id();
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None);
|
||||
for meta in segment_metas {
|
||||
let segment_id = meta.id();
|
||||
let segment = Segment::new_persisted(meta, directory.clone(), schema.clone());
|
||||
let segment_entry = SegmentEntry::new(segment, delete_cursor.clone(), None);
|
||||
segment_states.insert(segment_id, segment_entry);
|
||||
}
|
||||
SegmentRegister { segment_states }
|
||||
@@ -95,6 +111,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::core::{SegmentId, SegmentMetaInventory};
|
||||
use crate::indexer::delete_queue::*;
|
||||
use crate::schema::Schema;
|
||||
|
||||
fn segment_ids(segment_register: &SegmentRegister) -> Vec<SegmentId> {
|
||||
segment_register
|
||||
@@ -108,6 +125,7 @@ mod tests {
|
||||
fn test_segment_register() {
|
||||
let inventory = SegmentMetaInventory::default();
|
||||
let delete_queue = DeleteQueue::new();
|
||||
let schema = Schema::builder().build();
|
||||
|
||||
let mut segment_register = SegmentRegister::default();
|
||||
let segment_id_a = SegmentId::generate_random();
|
||||
@@ -115,21 +133,24 @@ mod tests {
|
||||
let segment_id_merged = SegmentId::generate_random();
|
||||
|
||||
{
|
||||
let segment_meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
||||
let meta = inventory.new_segment_meta(segment_id_a, 0u32);
|
||||
let segment = Segment::new_volatile(meta, schema.clone());
|
||||
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
|
||||
{
|
||||
let segment_meta = inventory.new_segment_meta(segment_id_b, 0u32);
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None);
|
||||
let segment = Segment::new_volatile(segment_meta, schema.clone());
|
||||
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
segment_register.remove_segment(&segment_id_a);
|
||||
segment_register.remove_segment(&segment_id_b);
|
||||
{
|
||||
let segment_meta_merged = inventory.new_segment_meta(segment_id_merged, 0u32);
|
||||
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None);
|
||||
let segment = Segment::new_volatile(segment_meta_merged, schema);
|
||||
let segment_entry = SegmentEntry::new(segment, delete_queue.cursor(), None);
|
||||
segment_register.add_segment_entry(segment_entry);
|
||||
}
|
||||
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
|
||||
|
||||
@@ -7,11 +7,10 @@ use crate::core::SegmentMeta;
|
||||
use crate::core::SerializableSegment;
|
||||
use crate::core::META_FILEPATH;
|
||||
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
||||
use crate::indexer::delete_queue::DeleteCursor;
|
||||
use crate::indexer::index_writer::advance_deletes;
|
||||
use crate::indexer::merge_operation::MergeOperationInventory;
|
||||
use crate::indexer::merger::IndexMerger;
|
||||
use crate::indexer::segment_manager::SegmentsStatus;
|
||||
use crate::indexer::segment_manager::{SegmentRegisters, SegmentsStatus};
|
||||
use crate::indexer::stamper::Stamper;
|
||||
use crate::indexer::SegmentEntry;
|
||||
use crate::indexer::SegmentSerializer;
|
||||
@@ -117,8 +116,7 @@ fn merge(
|
||||
|
||||
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
||||
for segment_entry in &mut segment_entries {
|
||||
let segment = index.segment(segment_entry.meta().clone());
|
||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||
advance_deletes(segment_entry, target_opstamp)?;
|
||||
}
|
||||
|
||||
let delete_cursor = segment_entries[0].delete_cursor().clone();
|
||||
@@ -134,11 +132,13 @@ fn merge(
|
||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
||||
|
||||
let num_docs = merger.write(segment_serializer)?;
|
||||
let max_doc = merger.write(segment_serializer)?;
|
||||
|
||||
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
||||
|
||||
Ok(SegmentEntry::new(segment_meta.clone(), delete_cursor, None))
|
||||
Ok(SegmentEntry::new(
|
||||
merged_segment.with_max_doc(max_doc),
|
||||
delete_cursor,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) struct InnerSegmentUpdater {
|
||||
@@ -162,12 +162,11 @@ pub(crate) struct InnerSegmentUpdater {
|
||||
|
||||
impl SegmentUpdater {
|
||||
pub fn create(
|
||||
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||
index: Index,
|
||||
stamper: Stamper,
|
||||
delete_cursor: &DeleteCursor,
|
||||
) -> crate::Result<SegmentUpdater> {
|
||||
let segments = index.searchable_segment_metas()?;
|
||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
||||
let segment_manager = SegmentManager::new(segment_registers);
|
||||
let pool = ThreadPoolBuilder::new()
|
||||
.name_prefix("segment_updater")
|
||||
.pool_size(1)
|
||||
@@ -210,9 +209,15 @@ impl SegmentUpdater {
|
||||
f: F,
|
||||
) -> impl Future<Output = crate::Result<T>> {
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
self.pool.spawn_ok(async move {
|
||||
let _ = sender.send(f.await);
|
||||
});
|
||||
if self.is_alive() {
|
||||
self.pool.spawn_ok(async move {
|
||||
let _ = sender.send(f.await);
|
||||
});
|
||||
} else {
|
||||
let _ = sender.send(Err(crate::TantivyError::SystemError(
|
||||
"Segment updater killed".to_string(),
|
||||
)));
|
||||
}
|
||||
receiver.unwrap_or_else(|_| {
|
||||
let err_msg =
|
||||
"A segment_updater future did not success. This should never happen.".to_string();
|
||||
@@ -224,6 +229,7 @@ impl SegmentUpdater {
|
||||
&self,
|
||||
segment_entry: SegmentEntry,
|
||||
) -> impl Future<Output = crate::Result<()>> {
|
||||
// TODO temporary: serializing the segment at this point.
|
||||
let segment_updater = self.clone();
|
||||
self.schedule_future(async move {
|
||||
segment_updater.segment_manager.add_segment(segment_entry);
|
||||
@@ -252,8 +258,7 @@ impl SegmentUpdater {
|
||||
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
||||
let mut segment_entries = self.segment_manager.segment_entries();
|
||||
for segment_entry in &mut segment_entries {
|
||||
let segment = self.index.segment(segment_entry.meta().clone());
|
||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||
advance_deletes(segment_entry, target_opstamp)?;
|
||||
}
|
||||
Ok(segment_entries)
|
||||
}
|
||||
@@ -321,16 +326,23 @@ impl SegmentUpdater {
|
||||
&self,
|
||||
opstamp: Opstamp,
|
||||
payload: Option<String>,
|
||||
soft_commit: bool,
|
||||
) -> impl Future<Output = crate::Result<()>> {
|
||||
let segment_updater: SegmentUpdater = self.clone();
|
||||
let directory = self.index.directory().clone();
|
||||
self.schedule_future(async move {
|
||||
if segment_updater.is_alive() {
|
||||
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||
segment_updater.segment_manager.commit(segment_entries);
|
||||
segment_updater.save_metas(opstamp, payload)?;
|
||||
let _ = garbage_collect_files(segment_updater.clone()).await;
|
||||
segment_updater.consider_merge_options().await;
|
||||
let mut segment_entries = segment_updater.purge_deletes(opstamp)?;
|
||||
if !soft_commit {
|
||||
for segment_entry in &mut segment_entries {
|
||||
segment_entry.persist(directory.clone())?;
|
||||
}
|
||||
}
|
||||
segment_updater.segment_manager.commit(segment_entries);
|
||||
if !soft_commit {
|
||||
segment_updater.save_metas(opstamp, payload)?;
|
||||
}
|
||||
let _ = garbage_collect_files(segment_updater.clone()).await;
|
||||
segment_updater.consider_merge_options().await;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
@@ -466,17 +478,14 @@ impl SegmentUpdater {
|
||||
let end_merge_future = self.schedule_future(async move {
|
||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||
{
|
||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor();
|
||||
if let Some(delete_operation) = delete_cursor.get() {
|
||||
let committed_opstamp = segment_updater.load_metas().opstamp;
|
||||
if delete_operation.opstamp < committed_opstamp {
|
||||
let index = &segment_updater.index;
|
||||
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
||||
if let Err(e) = advance_deletes(
|
||||
segment,
|
||||
&mut after_merge_segment_entry,
|
||||
committed_opstamp,
|
||||
) {
|
||||
let _index = &segment_updater.index;
|
||||
if let Err(e) =
|
||||
advance_deletes(&mut after_merge_segment_entry, committed_opstamp)
|
||||
{
|
||||
error!(
|
||||
"Merge of {:?} was cancelled (advancing deletes failed): {:?}",
|
||||
merge_operation.segment_ids(),
|
||||
@@ -526,11 +535,7 @@ impl SegmentUpdater {
|
||||
/// Obsolete files will eventually be cleaned up
|
||||
/// by the directory garbage collector.
|
||||
pub fn wait_merging_thread(&self) -> crate::Result<()> {
|
||||
for merge_operation in self.merge_operations.changes_iter() {
|
||||
if merge_operation.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
self.merge_operations.wait_until_empty();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ use crate::schema::Schema;
|
||||
use crate::schema::Term;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{Field, FieldEntry};
|
||||
use crate::tokenizer::BoxedTokenizer;
|
||||
use crate::tokenizer::FacetTokenizer;
|
||||
use crate::tokenizer::PreTokenizedStream;
|
||||
use crate::tokenizer::{BoxedTokenizer, TokenizerManager};
|
||||
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
|
||||
use crate::DocId;
|
||||
use crate::Opstamp;
|
||||
@@ -66,11 +66,12 @@ impl SegmentWriter {
|
||||
pub fn for_segment(
|
||||
memory_budget: usize,
|
||||
mut segment: Segment,
|
||||
schema: &Schema,
|
||||
tokenizers: &TokenizerManager,
|
||||
) -> Result<SegmentWriter> {
|
||||
let schema = segment.schema();
|
||||
let table_num_bits = initial_table_size(memory_budget)?;
|
||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
|
||||
let multifield_postings = MultiFieldPostingsWriter::new(&schema, table_num_bits);
|
||||
let tokenizers = schema
|
||||
.fields()
|
||||
.map(
|
||||
@@ -79,7 +80,7 @@ impl SegmentWriter {
|
||||
.get_indexing_options()
|
||||
.and_then(|text_index_option| {
|
||||
let tokenizer_name = &text_index_option.tokenizer();
|
||||
segment.index().tokenizers().get(tokenizer_name)
|
||||
tokenizers.get(tokenizer_name)
|
||||
}),
|
||||
_ => None,
|
||||
},
|
||||
@@ -88,9 +89,9 @@ impl SegmentWriter {
|
||||
Ok(SegmentWriter {
|
||||
max_doc: 0,
|
||||
multifield_postings,
|
||||
fieldnorms_writer: FieldNormsWriter::for_schema(schema),
|
||||
fieldnorms_writer: FieldNormsWriter::for_schema(&schema),
|
||||
segment_serializer,
|
||||
fast_field_writers: FastFieldsWriter::from_schema(schema),
|
||||
fast_field_writers: FastFieldsWriter::from_schema(&schema),
|
||||
doc_opstamps: Vec::with_capacity(1_000),
|
||||
tokenizers,
|
||||
})
|
||||
@@ -249,6 +250,7 @@ impl SegmentWriter {
|
||||
}
|
||||
}
|
||||
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
||||
doc.prepare_for_store();
|
||||
let doc_writer = self.segment_serializer.get_store_writer();
|
||||
doc_writer.store(&doc)?;
|
||||
self.max_doc += 1;
|
||||
|
||||
137
src/lib.rs
Executable file → Normal file
137
src/lib.rs
Executable file → Normal file
@@ -160,7 +160,6 @@ pub use self::snippet::{Snippet, SnippetGenerator};
|
||||
|
||||
mod docset;
|
||||
pub use self::docset::{DocSet, SkipResult};
|
||||
|
||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||
pub use crate::core::SegmentComponent;
|
||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||
@@ -170,11 +169,58 @@ pub use crate::indexer::IndexWriter;
|
||||
pub use crate::postings::Postings;
|
||||
pub use crate::reader::LeasedItem;
|
||||
pub use crate::schema::{Document, Term};
|
||||
use std::fmt;
|
||||
|
||||
/// Expose the current version of tantivy, as well
|
||||
/// whether it was compiled with the simd compression.
|
||||
pub fn version() -> &'static str {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
/// Index format version.
|
||||
const INDEX_FORMAT_VERSION: u32 = 1;
|
||||
|
||||
/// Structure version for the index.
|
||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Version {
|
||||
major: u32,
|
||||
minor: u32,
|
||||
patch: u32,
|
||||
index_format_version: u32,
|
||||
store_compression: String,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Version {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
static VERSION: Lazy<Version> = Lazy::new(|| Version {
|
||||
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
||||
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
|
||||
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
|
||||
index_format_version: INDEX_FORMAT_VERSION,
|
||||
store_compression: crate::store::COMPRESSION.to_string(),
|
||||
});
|
||||
|
||||
impl ToString for Version {
|
||||
fn to_string(&self) -> String {
|
||||
format!(
|
||||
"tantivy v{}.{}.{}, index_format v{}, store_compression: {}",
|
||||
self.major, self.minor, self.patch, self.index_format_version, self.store_compression
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
static VERSION_STRING: Lazy<String> = Lazy::new(|| VERSION.to_string());
|
||||
|
||||
/// Expose the current version of tantivy as found in Cargo.toml during compilation.
|
||||
/// eg. "0.11.0" as well as the compression scheme used in the docstore.
|
||||
pub fn version() -> &'static Version {
|
||||
&VERSION
|
||||
}
|
||||
|
||||
/// Exposes the complete version of tantivy as found in Cargo.toml during compilation as a string.
|
||||
/// eg. "tantivy v0.11.0, index_format v1, store_compression: lz4".
|
||||
pub fn version_string() -> &'static str {
|
||||
VERSION_STRING.as_str()
|
||||
}
|
||||
|
||||
/// Defines tantivy's merging strategy
|
||||
@@ -287,6 +333,18 @@ mod tests {
|
||||
sample_with_seed(n, ratio, 4)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
fn test_version_string() {
|
||||
use regex::Regex;
|
||||
let regex_ptn = Regex::new(
|
||||
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
|
||||
)
|
||||
.unwrap();
|
||||
let version = super::version().to_string();
|
||||
assert!(regex_ptn.find(&version).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "mmap")]
|
||||
fn test_indexing() {
|
||||
@@ -882,4 +940,73 @@ mod tests {
|
||||
assert_eq!(fast_field_reader.get(0), 4f64)
|
||||
}
|
||||
}
|
||||
|
||||
// motivated by #729
|
||||
#[test]
|
||||
fn test_update_via_delete_insert() {
|
||||
use crate::collector::Count;
|
||||
use crate::indexer::NoMergePolicy;
|
||||
use crate::query::AllQuery;
|
||||
use crate::SegmentId;
|
||||
use futures::executor::block_on;
|
||||
|
||||
const DOC_COUNT: u64 = 2u64;
|
||||
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let id = schema_builder.add_u64_field("id", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
let index_reader = index.reader().unwrap();
|
||||
|
||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
||||
|
||||
for doc_id in 0u64..DOC_COUNT {
|
||||
index_writer.add_document(doc!(id => doc_id));
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
|
||||
assert_eq!(
|
||||
searcher.search(&AllQuery, &Count).unwrap(),
|
||||
DOC_COUNT as usize
|
||||
);
|
||||
|
||||
// update the 10 elements by deleting and re-adding
|
||||
for doc_id in 0u64..DOC_COUNT {
|
||||
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
||||
index_writer.commit().unwrap();
|
||||
index_reader.reload().unwrap();
|
||||
let doc = doc!(id => doc_id);
|
||||
index_writer.add_document(doc);
|
||||
index_writer.commit().unwrap();
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
// The number of document should be stable.
|
||||
assert_eq!(
|
||||
searcher.search(&AllQuery, &Count).unwrap(),
|
||||
DOC_COUNT as usize
|
||||
);
|
||||
}
|
||||
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
let segment_ids: Vec<SegmentId> = searcher
|
||||
.segment_readers()
|
||||
.into_iter()
|
||||
.map(|reader| reader.segment_id())
|
||||
.collect();
|
||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
||||
|
||||
index_reader.reload().unwrap();
|
||||
let searcher = index_reader.searcher();
|
||||
|
||||
assert_eq!(
|
||||
searcher.search(&AllQuery, &Count).unwrap(),
|
||||
DOC_COUNT as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,9 +35,9 @@
|
||||
/// let likes = schema_builder.add_u64_field("num_u64", FAST);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let doc = doc!(
|
||||
/// title => "Life Aquatic",
|
||||
/// author => "Wes Anderson",
|
||||
/// likes => 4u64
|
||||
/// title => "Life Aquatic",
|
||||
/// author => "Wes Anderson",
|
||||
/// likes => 4u64
|
||||
/// );
|
||||
/// # }
|
||||
/// ```
|
||||
|
||||
@@ -36,11 +36,10 @@ struct Positions {
|
||||
|
||||
impl Positions {
|
||||
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
|
||||
let skip_len = skip_source.len();
|
||||
let (body, footer) = skip_source.split(skip_len - u32::SIZE_IN_BYTES);
|
||||
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
|
||||
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
||||
let body_split = body.len() - u64::SIZE_IN_BYTES * (num_long_skips as usize);
|
||||
let (skip_source, long_skip_source) = body.split(body_split);
|
||||
let (skip_source, long_skip_source) =
|
||||
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
||||
Positions {
|
||||
bit_packer: BitPacker4x::new(),
|
||||
skip_source,
|
||||
|
||||
@@ -220,7 +220,7 @@ pub mod tests {
|
||||
|
||||
{
|
||||
let mut segment_writer =
|
||||
SegmentWriter::for_segment(3_000_000, segment.clone(), &schema).unwrap();
|
||||
SegmentWriter::for_segment(3_000_000, segment.clone(), index.tokenizers()).unwrap();
|
||||
{
|
||||
let mut doc = Document::default();
|
||||
// checking that position works if the field has two values
|
||||
|
||||
@@ -54,21 +54,21 @@ where
|
||||
match self.excluding_state {
|
||||
State::ExcludeOne(excluded_doc) => {
|
||||
if doc == excluded_doc {
|
||||
false
|
||||
} else if excluded_doc > doc {
|
||||
true
|
||||
} else {
|
||||
match self.excluding_docset.skip_next(doc) {
|
||||
SkipResult::OverStep => {
|
||||
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
|
||||
true
|
||||
}
|
||||
SkipResult::End => {
|
||||
self.excluding_state = State::Finished;
|
||||
true
|
||||
}
|
||||
SkipResult::Reached => false,
|
||||
return false;
|
||||
}
|
||||
if excluded_doc > doc {
|
||||
return true;
|
||||
}
|
||||
match self.excluding_docset.skip_next(doc) {
|
||||
SkipResult::OverStep => {
|
||||
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
|
||||
true
|
||||
}
|
||||
SkipResult::End => {
|
||||
self.excluding_state = State::Finished;
|
||||
true
|
||||
}
|
||||
SkipResult::Reached => false,
|
||||
}
|
||||
}
|
||||
State::Finished => true,
|
||||
|
||||
@@ -33,7 +33,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result, Term};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
@@ -59,7 +58,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// {
|
||||
///
|
||||
/// let term = Term::from_field_text(title, "Diary");
|
||||
/// let query = FuzzyTermQuery::new(term, 1, true);
|
||||
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
|
||||
@@ -69,6 +67,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// # assert!(example().is_ok());
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FuzzyTermQuery {
|
||||
|
||||
@@ -4,6 +4,7 @@ use crate::postings::Postings;
|
||||
use crate::query::bm25::BM25Weight;
|
||||
use crate::query::{Intersection, Scorer};
|
||||
use crate::DocId;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
struct PostingsWithOffset<TPostings> {
|
||||
offset: u32,
|
||||
@@ -59,12 +60,16 @@ fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
|
||||
while left_i < left.len() && right_i < right.len() {
|
||||
let left_val = left[left_i];
|
||||
let right_val = right[right_i];
|
||||
if left_val < right_val {
|
||||
left_i += 1;
|
||||
} else if right_val < left_val {
|
||||
right_i += 1;
|
||||
} else {
|
||||
return true;
|
||||
match left_val.cmp(&right_val) {
|
||||
Ordering::Less => {
|
||||
left_i += 1;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return true;
|
||||
}
|
||||
Ordering::Greater => {
|
||||
right_i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
@@ -77,14 +82,18 @@ fn intersection_count(left: &[u32], right: &[u32]) -> usize {
|
||||
while left_i < left.len() && right_i < right.len() {
|
||||
let left_val = left[left_i];
|
||||
let right_val = right[right_i];
|
||||
if left_val < right_val {
|
||||
left_i += 1;
|
||||
} else if right_val < left_val {
|
||||
right_i += 1;
|
||||
} else {
|
||||
count += 1;
|
||||
left_i += 1;
|
||||
right_i += 1;
|
||||
match left_val.cmp(&right_val) {
|
||||
Ordering::Less => {
|
||||
left_i += 1;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
count += 1;
|
||||
left_i += 1;
|
||||
right_i += 1;
|
||||
}
|
||||
Ordering::Greater => {
|
||||
right_i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
count
|
||||
@@ -103,15 +112,19 @@ fn intersection(left: &mut [u32], right: &[u32]) -> usize {
|
||||
while left_i < left_len && right_i < right_len {
|
||||
let left_val = left[left_i];
|
||||
let right_val = right[right_i];
|
||||
if left_val < right_val {
|
||||
left_i += 1;
|
||||
} else if right_val < left_val {
|
||||
right_i += 1;
|
||||
} else {
|
||||
left[count] = left_val;
|
||||
count += 1;
|
||||
left_i += 1;
|
||||
right_i += 1;
|
||||
match left_val.cmp(&right_val) {
|
||||
Ordering::Less => {
|
||||
left_i += 1;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
left[count] = left_val;
|
||||
count += 1;
|
||||
left_i += 1;
|
||||
right_i += 1;
|
||||
}
|
||||
Ordering::Greater => {
|
||||
right_i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
count
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::query::PhraseQuery;
|
||||
use crate::query::Query;
|
||||
use crate::query::RangeQuery;
|
||||
use crate::query::TermQuery;
|
||||
use crate::schema::IndexRecordOption;
|
||||
use crate::schema::{Facet, IndexRecordOption};
|
||||
use crate::schema::{Field, Schema};
|
||||
use crate::schema::{FieldType, Term};
|
||||
use crate::tokenizer::TokenizerManager;
|
||||
@@ -319,7 +319,10 @@ impl QueryParser {
|
||||
))
|
||||
}
|
||||
}
|
||||
FieldType::HierarchicalFacet => Ok(vec![(0, Term::from_field_text(field, phrase))]),
|
||||
FieldType::HierarchicalFacet => {
|
||||
let facet = Facet::from_text(phrase);
|
||||
Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))])
|
||||
}
|
||||
FieldType::Bytes => {
|
||||
let field_name = self.schema.get_field_name(field).to_string();
|
||||
Err(QueryParserError::FieldNotIndexed(field_name))
|
||||
@@ -554,6 +557,7 @@ mod test {
|
||||
schema_builder.add_text_field("with_stop_words", text_options);
|
||||
schema_builder.add_date_field("date", INDEXED);
|
||||
schema_builder.add_f64_field("float", INDEXED);
|
||||
schema_builder.add_facet_field("facet");
|
||||
let schema = schema_builder.build();
|
||||
let default_fields = vec![title, text];
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
@@ -588,9 +592,13 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_query_simple() {
|
||||
pub fn test_parse_query_facet() {
|
||||
let query_parser = make_query_parser();
|
||||
assert!(query_parser.parse_query("toto").is_ok());
|
||||
let query = query_parser.parse_query("facet:/root/branch/leaf").unwrap();
|
||||
assert_eq!(
|
||||
format!("{:?}", query),
|
||||
"TermQuery(Term(field=11,bytes=[114, 111, 111, 116, 0, 98, 114, 97, 110, 99, 104, 0, 108, 101, 97, 102]))"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -38,41 +38,33 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// # use tantivy::collector::Count;
|
||||
/// # use tantivy::query::RangeQuery;
|
||||
/// # use tantivy::schema::{Schema, INDEXED};
|
||||
/// # use tantivy::{doc, Index, Result};
|
||||
/// #
|
||||
/// # fn run() -> Result<()> {
|
||||
/// # let mut schema_builder = Schema::builder();
|
||||
/// # let year_field = schema_builder.add_u64_field("year", INDEXED);
|
||||
/// # let schema = schema_builder.build();
|
||||
/// #
|
||||
/// # let index = Index::create_in_ram(schema);
|
||||
/// # {
|
||||
/// # let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
|
||||
/// # for year in 1950u64..2017u64 {
|
||||
/// # let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
||||
/// # for _ in 0..num_docs_within_year {
|
||||
/// # index_writer.add_document(doc!(year_field => year));
|
||||
/// # }
|
||||
/// # }
|
||||
/// # index_writer.commit().unwrap();
|
||||
/// # }
|
||||
/// # let reader = index.reader()?;
|
||||
/// use tantivy::collector::Count;
|
||||
/// use tantivy::query::RangeQuery;
|
||||
/// use tantivy::schema::{Schema, INDEXED};
|
||||
/// use tantivy::{doc, Index};
|
||||
/// # fn test() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let year_field = schema_builder.add_u64_field("year", INDEXED);
|
||||
/// let schema = schema_builder.build();
|
||||
///
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
||||
/// for year in 1950u64..2017u64 {
|
||||
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
||||
/// for _ in 0..num_docs_within_year {
|
||||
/// index_writer.add_document(doc!(year_field => year));
|
||||
/// }
|
||||
/// }
|
||||
/// index_writer.commit()?;
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
|
||||
///
|
||||
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
|
||||
///
|
||||
/// # assert_eq!(num_60s_books, 2285);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// #
|
||||
/// # fn main() {
|
||||
/// # run().unwrap()
|
||||
/// assert_eq!(num_60s_books, 2285);
|
||||
/// Ok(())
|
||||
/// # }
|
||||
/// # assert!(test().is_ok());
|
||||
/// ```
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RangeQuery {
|
||||
|
||||
@@ -15,40 +15,40 @@ use tantivy_fst::Regex;
|
||||
/// use tantivy::collector::Count;
|
||||
/// use tantivy::query::RegexQuery;
|
||||
/// use tantivy::schema::{Schema, TEXT};
|
||||
/// use tantivy::{doc, Index, Result, Term};
|
||||
/// use tantivy::{doc, Index, Term};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let term = Term::from_field_text(title, "Diary");
|
||||
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
|
||||
/// let count = searcher.search(&query, &Count)?;
|
||||
/// assert_eq!(count, 3);
|
||||
/// Ok(())
|
||||
/// # fn test() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
///
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let term = Term::from_field_text(title, "Diary");
|
||||
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
|
||||
/// let count = searcher.search(&query, &Count)?;
|
||||
/// assert_eq!(count, 3);
|
||||
/// Ok(())
|
||||
/// # }
|
||||
/// # assert!(test().is_ok());
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegexQuery {
|
||||
|
||||
@@ -112,6 +112,7 @@ mod tests {
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
let term_query = TermQuery::new(term_a, IndexRecordOption::Basic);
|
||||
let reader = index.reader().unwrap();
|
||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
||||
assert_eq!(term_query.count(&*reader.searcher()).unwrap(), 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -23,42 +23,39 @@ use std::fmt;
|
||||
/// use tantivy::collector::{Count, TopDocs};
|
||||
/// use tantivy::query::TermQuery;
|
||||
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
|
||||
/// use tantivy::{doc, Index, Result, Term};
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit()?;
|
||||
/// }
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
///
|
||||
/// let query = TermQuery::new(
|
||||
/// Term::from_field_text(title, "diary"),
|
||||
/// IndexRecordOption::Basic,
|
||||
/// );
|
||||
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
|
||||
/// assert_eq!(count, 2);
|
||||
///
|
||||
/// Ok(())
|
||||
/// use tantivy::{doc, Index, Term};
|
||||
/// # fn test() -> tantivy::Result<()> {
|
||||
/// let mut schema_builder = Schema::builder();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer(3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit()?;
|
||||
/// }
|
||||
/// let reader = index.reader()?;
|
||||
/// let searcher = reader.searcher();
|
||||
/// let query = TermQuery::new(
|
||||
/// Term::from_field_text(title, "diary"),
|
||||
/// IndexRecordOption::Basic,
|
||||
/// );
|
||||
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count))?;
|
||||
/// assert_eq!(count, 2);
|
||||
/// Ok(())
|
||||
/// # }
|
||||
/// # assert!(test().is_ok());
|
||||
/// ```
|
||||
#[derive(Clone)]
|
||||
pub struct TermQuery {
|
||||
|
||||
83
src/reader/index_writer_reader.rs
Normal file
83
src/reader/index_writer_reader.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use crate::directory::{WatchCallbackList, WatchHandle};
|
||||
use crate::indexer::SegmentRegisters;
|
||||
use crate::reader::pool::Pool;
|
||||
use crate::{Index, LeasedItem, Searcher, Segment, SegmentReader};
|
||||
use std::iter::repeat_with;
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
|
||||
struct InnerNRTReader {
|
||||
num_searchers: usize,
|
||||
index: Index,
|
||||
searcher_pool: Pool<Searcher>,
|
||||
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||
}
|
||||
|
||||
impl InnerNRTReader {
|
||||
fn load_segment_readers(&self) -> crate::Result<Vec<SegmentReader>> {
|
||||
let segments: Vec<Segment> = {
|
||||
let rlock = self.segment_registers.read().unwrap();
|
||||
rlock.committed_segment()
|
||||
};
|
||||
segments
|
||||
.iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect::<crate::Result<Vec<SegmentReader>>>()
|
||||
}
|
||||
|
||||
pub fn reload(&self) -> crate::Result<()> {
|
||||
let segment_readers: Vec<SegmentReader> = self.load_segment_readers()?;
|
||||
let schema = self.index.schema();
|
||||
let searchers = repeat_with(|| {
|
||||
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
|
||||
})
|
||||
.take(self.num_searchers)
|
||||
.collect();
|
||||
self.searcher_pool.publish_new_generation(searchers);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||
self.searcher_pool.acquire()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NRTReader {
|
||||
inner: Arc<InnerNRTReader>,
|
||||
watch_handle: WatchHandle,
|
||||
}
|
||||
|
||||
impl NRTReader {
|
||||
pub fn reload(&self) -> crate::Result<()> {
|
||||
self.inner.reload()
|
||||
}
|
||||
|
||||
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||
self.inner.searcher()
|
||||
}
|
||||
|
||||
pub(crate) fn create(
|
||||
num_searchers: usize,
|
||||
index: Index,
|
||||
segment_registers: Arc<RwLock<SegmentRegisters>>,
|
||||
watch_callback_list: &WatchCallbackList,
|
||||
) -> crate::Result<Self> {
|
||||
let inner_reader: Arc<InnerNRTReader> = Arc::new(InnerNRTReader {
|
||||
num_searchers,
|
||||
index,
|
||||
searcher_pool: Pool::new(),
|
||||
segment_registers,
|
||||
});
|
||||
let inner_reader_weak: Weak<InnerNRTReader> = Arc::downgrade(&inner_reader);
|
||||
let watch_handle = watch_callback_list.subscribe(Box::new(move || {
|
||||
if let Some(nrt_reader_arc) = inner_reader_weak.upgrade() {
|
||||
let _ = nrt_reader_arc.reload();
|
||||
}
|
||||
}));
|
||||
inner_reader.reload()?;
|
||||
Ok(NRTReader {
|
||||
inner: inner_reader,
|
||||
watch_handle,
|
||||
})
|
||||
}
|
||||
}
|
||||
180
src/reader/meta_file_reader.rs
Normal file
180
src/reader/meta_file_reader.rs
Normal file
@@ -0,0 +1,180 @@
|
||||
use super::pool::Pool;
|
||||
use crate::core::Segment;
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::META_LOCK;
|
||||
use crate::Searcher;
|
||||
use crate::SegmentReader;
|
||||
use crate::{Index, LeasedItem};
|
||||
use crate::{IndexReader, Result};
|
||||
use std::iter::repeat_with;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Defines when a new version of the index should be reloaded.
|
||||
///
|
||||
/// Regardless of whether you search and index in the same process, tantivy does not necessarily
|
||||
/// reflects the change that are commited to your index. `ReloadPolicy` precisely helps you define
|
||||
/// when you want your index to be reloaded.
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum ReloadPolicy {
|
||||
/// The index is entirely reloaded manually.
|
||||
/// All updates of the index should be manual.
|
||||
///
|
||||
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
|
||||
Manual,
|
||||
/// The index is reloaded within milliseconds after a new commit is available.
|
||||
/// This is made possible by watching changes in the `meta.json` file.
|
||||
OnCommit, // TODO add NEAR_REAL_TIME(target_ms)
|
||||
}
|
||||
|
||||
/// `IndexReader` builder
|
||||
///
|
||||
/// It makes it possible to set the following values.
|
||||
///
|
||||
/// - `num_searchers` (by default, the number of detected CPU threads):
|
||||
///
|
||||
/// When `num_searchers` queries are requested at the same time, the `num_searchers` will block
|
||||
/// until the one of the searcher in-use gets released.
|
||||
/// - `reload_policy` (by default `ReloadPolicy::OnCommit`):
|
||||
///
|
||||
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
||||
#[derive(Clone)]
|
||||
pub struct IndexReaderBuilder {
|
||||
num_searchers: usize,
|
||||
reload_policy: ReloadPolicy,
|
||||
index: Index,
|
||||
}
|
||||
|
||||
impl IndexReaderBuilder {
|
||||
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
|
||||
IndexReaderBuilder {
|
||||
num_searchers: num_cpus::get(),
|
||||
reload_policy: ReloadPolicy::Manual,
|
||||
index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the reader.
|
||||
///
|
||||
/// Building the reader is a non-trivial operation that requires
|
||||
/// to open different segment readers. It may take hundreds of milliseconds
|
||||
/// of time and it may return an error.
|
||||
/// TODO(pmasurel) Use the `TryInto` trait once it is available in stable.
|
||||
pub fn try_into(self) -> Result<IndexReader> {
|
||||
let inner_reader = MetaFileIndexReaderInner {
|
||||
index: self.index,
|
||||
num_searchers: self.num_searchers,
|
||||
searcher_pool: Pool::new(),
|
||||
};
|
||||
inner_reader.reload()?;
|
||||
let inner_reader_arc = Arc::new(inner_reader);
|
||||
let watch_handle_opt: Option<WatchHandle>;
|
||||
match self.reload_policy {
|
||||
ReloadPolicy::Manual => {
|
||||
// No need to set anything...
|
||||
watch_handle_opt = None;
|
||||
}
|
||||
ReloadPolicy::OnCommit => {
|
||||
let inner_reader_arc_clone = inner_reader_arc.clone();
|
||||
let callback = move || {
|
||||
if let Err(err) = inner_reader_arc_clone.reload() {
|
||||
error!(
|
||||
"Error while loading searcher after commit was detected. {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
};
|
||||
let watch_handle = inner_reader_arc
|
||||
.index
|
||||
.directory()
|
||||
.watch(Box::new(callback))?;
|
||||
watch_handle_opt = Some(watch_handle);
|
||||
}
|
||||
}
|
||||
Ok(IndexReader::from(MetaFileIndexReader {
|
||||
inner: inner_reader_arc,
|
||||
watch_handle_opt,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Sets the reload_policy.
|
||||
///
|
||||
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
||||
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
|
||||
self.reload_policy = reload_policy;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the number of `Searcher` in the searcher pool.
|
||||
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
|
||||
self.num_searchers = num_searchers;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
struct MetaFileIndexReaderInner {
|
||||
num_searchers: usize,
|
||||
searcher_pool: Pool<Searcher>,
|
||||
index: Index,
|
||||
}
|
||||
|
||||
impl MetaFileIndexReaderInner {
|
||||
fn load_segment_readers(&self) -> crate::Result<Vec<SegmentReader>> {
|
||||
// We keep the lock until we have effectively finished opening the
|
||||
// the `SegmentReader` because it prevents a diffferent process
|
||||
// to garbage collect these file while we open them.
|
||||
//
|
||||
// Once opened, on linux & mac, the mmap will remain valid after
|
||||
// the file has been deleted
|
||||
// On windows, the file deletion will fail.
|
||||
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
|
||||
let searchable_segments = self.searchable_segments()?;
|
||||
searchable_segments
|
||||
.iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect::<Result<_>>()
|
||||
}
|
||||
|
||||
fn reload(&self) -> crate::Result<()> {
|
||||
let segment_readers: Vec<SegmentReader> = self.load_segment_readers()?;
|
||||
let schema = self.index.schema();
|
||||
let searchers = repeat_with(|| {
|
||||
Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone())
|
||||
})
|
||||
.take(self.num_searchers)
|
||||
.collect();
|
||||
self.searcher_pool.publish_new_generation(searchers);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the list of segments that are searchable
|
||||
fn searchable_segments(&self) -> crate::Result<Vec<Segment>> {
|
||||
self.index.searchable_segments()
|
||||
}
|
||||
|
||||
fn searcher(&self) -> LeasedItem<Searcher> {
|
||||
self.searcher_pool.acquire()
|
||||
}
|
||||
}
|
||||
|
||||
/// `IndexReader` is your entry point to read and search the index.
|
||||
///
|
||||
/// It controls when a new version of the index should be loaded and lends
|
||||
/// you instances of `Searcher` for the last loaded version.
|
||||
///
|
||||
/// `Clone` does not clone the different pool of searcher. `IndexReader`
|
||||
/// just wraps and `Arc`.
|
||||
#[derive(Clone)]
|
||||
pub struct MetaFileIndexReader {
|
||||
inner: Arc<MetaFileIndexReaderInner>,
|
||||
watch_handle_opt: Option<WatchHandle>,
|
||||
}
|
||||
|
||||
impl MetaFileIndexReader {
|
||||
pub fn reload(&self) -> crate::Result<()> {
|
||||
self.inner.reload()
|
||||
}
|
||||
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||
self.inner.searcher()
|
||||
}
|
||||
}
|
||||
@@ -1,172 +1,20 @@
|
||||
mod index_writer_reader;
|
||||
mod meta_file_reader;
|
||||
mod pool;
|
||||
|
||||
use self::meta_file_reader::MetaFileIndexReader;
|
||||
pub use self::meta_file_reader::{IndexReaderBuilder, ReloadPolicy};
|
||||
pub use self::pool::LeasedItem;
|
||||
use self::pool::Pool;
|
||||
use crate::core::Segment;
|
||||
use crate::directory::Directory;
|
||||
use crate::directory::WatchHandle;
|
||||
use crate::directory::META_LOCK;
|
||||
use crate::Index;
|
||||
use crate::Result;
|
||||
pub(crate) use crate::reader::index_writer_reader::{NRTReader};
|
||||
use crate::Searcher;
|
||||
use crate::SegmentReader;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Defines when a new version of the index should be reloaded.
|
||||
///
|
||||
/// Regardless of whether you search and index in the same process, tantivy does not necessarily
|
||||
/// reflects the change that are commited to your index. `ReloadPolicy` precisely helps you define
|
||||
/// when you want your index to be reloaded.
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum ReloadPolicy {
|
||||
/// The index is entirely reloaded manually.
|
||||
/// All updates of the index should be manual.
|
||||
///
|
||||
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
|
||||
Manual,
|
||||
/// The index is reloaded within milliseconds after a new commit is available.
|
||||
/// This is made possible by watching changes in the `meta.json` file.
|
||||
OnCommit, // TODO add NEAR_REAL_TIME(target_ms)
|
||||
}
|
||||
|
||||
/// `IndexReader` builder
|
||||
///
|
||||
/// It makes it possible to set the following values.
|
||||
///
|
||||
/// - `num_searchers` (by default, the number of detected CPU threads):
|
||||
///
|
||||
/// When `num_searchers` queries are requested at the same time, the `num_searchers` will block
|
||||
/// until the one of the searcher in-use gets released.
|
||||
/// - `reload_policy` (by default `ReloadPolicy::OnCommit`):
|
||||
///
|
||||
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
||||
#[derive(Clone)]
|
||||
pub struct IndexReaderBuilder {
|
||||
num_searchers: usize,
|
||||
reload_policy: ReloadPolicy,
|
||||
index: Index,
|
||||
}
|
||||
|
||||
impl IndexReaderBuilder {
|
||||
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
|
||||
IndexReaderBuilder {
|
||||
num_searchers: num_cpus::get(),
|
||||
reload_policy: ReloadPolicy::OnCommit,
|
||||
index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the reader.
|
||||
///
|
||||
/// Building the reader is a non-trivial operation that requires
|
||||
/// to open different segment readers. It may take hundreds of milliseconds
|
||||
/// of time and it may return an error.
|
||||
/// TODO(pmasurel) Use the `TryInto` trait once it is available in stable.
|
||||
pub fn try_into(self) -> Result<IndexReader> {
|
||||
let inner_reader = InnerIndexReader {
|
||||
index: self.index,
|
||||
num_searchers: self.num_searchers,
|
||||
searcher_pool: Pool::new(),
|
||||
};
|
||||
inner_reader.reload()?;
|
||||
let inner_reader_arc = Arc::new(inner_reader);
|
||||
let watch_handle_opt: Option<WatchHandle>;
|
||||
match self.reload_policy {
|
||||
ReloadPolicy::Manual => {
|
||||
// No need to set anything...
|
||||
watch_handle_opt = None;
|
||||
}
|
||||
ReloadPolicy::OnCommit => {
|
||||
let inner_reader_arc_clone = inner_reader_arc.clone();
|
||||
let callback = move || {
|
||||
if let Err(err) = inner_reader_arc_clone.reload() {
|
||||
error!(
|
||||
"Error while loading searcher after commit was detected. {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
};
|
||||
let watch_handle = inner_reader_arc
|
||||
.index
|
||||
.directory()
|
||||
.watch(Box::new(callback))?;
|
||||
watch_handle_opt = Some(watch_handle);
|
||||
}
|
||||
}
|
||||
Ok(IndexReader {
|
||||
inner: inner_reader_arc,
|
||||
watch_handle_opt,
|
||||
})
|
||||
}
|
||||
|
||||
/// Sets the reload_policy.
|
||||
///
|
||||
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
|
||||
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
|
||||
self.reload_policy = reload_policy;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the number of `Searcher` in the searcher pool.
|
||||
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
|
||||
self.num_searchers = num_searchers;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
struct InnerIndexReader {
|
||||
num_searchers: usize,
|
||||
searcher_pool: Pool<Searcher>,
|
||||
index: Index,
|
||||
}
|
||||
|
||||
impl InnerIndexReader {
|
||||
fn reload(&self) -> Result<()> {
|
||||
let segment_readers: Vec<SegmentReader> = {
|
||||
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
|
||||
let searchable_segments = self.searchable_segments()?;
|
||||
searchable_segments
|
||||
.iter()
|
||||
.map(SegmentReader::open)
|
||||
.collect::<Result<_>>()?
|
||||
};
|
||||
let schema = self.index.schema();
|
||||
let searchers = (0..self.num_searchers)
|
||||
.map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
|
||||
.collect();
|
||||
self.searcher_pool.publish_new_generation(searchers);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the list of segments that are searchable
|
||||
fn searchable_segments(&self) -> Result<Vec<Segment>> {
|
||||
self.index.searchable_segments()
|
||||
}
|
||||
|
||||
fn searcher(&self) -> LeasedItem<Searcher> {
|
||||
self.searcher_pool.acquire()
|
||||
}
|
||||
}
|
||||
|
||||
/// `IndexReader` is your entry point to read and search the index.
|
||||
///
|
||||
/// It controls when a new version of the index should be loaded and lends
|
||||
/// you instances of `Searcher` for the last loaded version.
|
||||
///
|
||||
/// `Clone` does not clone the different pool of searcher. `IndexReader`
|
||||
/// just wraps and `Arc`.
|
||||
#[derive(Clone)]
|
||||
pub struct IndexReader {
|
||||
inner: Arc<InnerIndexReader>,
|
||||
watch_handle_opt: Option<WatchHandle>,
|
||||
pub enum IndexReader {
|
||||
FromMetaFile(MetaFileIndexReader),
|
||||
NRT(NRTReader),
|
||||
}
|
||||
|
||||
impl IndexReader {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn index(&self) -> Index {
|
||||
self.inner.index.clone()
|
||||
}
|
||||
|
||||
/// Update searchers so that they reflect the state of the last
|
||||
/// `.commit()`.
|
||||
///
|
||||
@@ -176,8 +24,11 @@ impl IndexReader {
|
||||
///
|
||||
/// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests
|
||||
/// it can be nice to deterministically force the reload of searchers.
|
||||
pub fn reload(&self) -> Result<()> {
|
||||
self.inner.reload()
|
||||
pub fn reload(&self) -> crate::Result<()> {
|
||||
match self {
|
||||
IndexReader::FromMetaFile(meta_file_reader) => meta_file_reader.reload(),
|
||||
IndexReader::NRT(nrt_reader) => nrt_reader.reload(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a searcher
|
||||
@@ -191,6 +42,21 @@ impl IndexReader {
|
||||
/// The same searcher must be used for a given query, as it ensures
|
||||
/// the use of a consistent segment set.
|
||||
pub fn searcher(&self) -> LeasedItem<Searcher> {
|
||||
self.inner.searcher()
|
||||
match self {
|
||||
IndexReader::FromMetaFile(meta_file_reader) => meta_file_reader.searcher(),
|
||||
IndexReader::NRT(nrt_reader) => nrt_reader.searcher(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MetaFileIndexReader> for IndexReader {
|
||||
fn from(meta_file_reader: MetaFileIndexReader) -> Self {
|
||||
IndexReader::FromMetaFile(meta_file_reader)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NRTReader> for IndexReader {
|
||||
fn from(nrt_reader: NRTReader) -> Self {
|
||||
IndexReader::NRT(nrt_reader)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,6 +155,21 @@ impl Document {
|
||||
.find(|field_value| field_value.field() == field)
|
||||
.map(FieldValue::value)
|
||||
}
|
||||
|
||||
/// Prepares Document for being stored in the document store
|
||||
///
|
||||
/// Method transforms PreTokenizedString values into String
|
||||
/// values.
|
||||
pub fn prepare_for_store(&mut self) {
|
||||
for field_value in &mut self.field_values {
|
||||
if let Value::PreTokStr(pre_tokenized_text) = field_value.value() {
|
||||
*field_value = FieldValue::new(
|
||||
field_value.field(),
|
||||
Value::Str(pre_tokenized_text.text.clone()), //< TODO somehow remove .clone()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BinarySerializable for Document {
|
||||
@@ -180,6 +195,7 @@ impl BinarySerializable for Document {
|
||||
mod tests {
|
||||
|
||||
use crate::schema::*;
|
||||
use crate::tokenizer::{PreTokenizedString, Token};
|
||||
|
||||
#[test]
|
||||
fn test_doc() {
|
||||
@@ -189,4 +205,38 @@ mod tests {
|
||||
doc.add_text(text_field, "My title");
|
||||
assert_eq!(doc.field_values().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prepare_for_store() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let text_field = schema_builder.add_text_field("title", TEXT);
|
||||
let mut doc = Document::default();
|
||||
|
||||
let pre_tokenized_text = PreTokenizedString {
|
||||
text: String::from("A"),
|
||||
tokens: vec![Token {
|
||||
offset_from: 0,
|
||||
offset_to: 1,
|
||||
position: 0,
|
||||
text: String::from("A"),
|
||||
position_length: 1,
|
||||
}],
|
||||
};
|
||||
|
||||
doc.add_pre_tokenized_text(text_field, &pre_tokenized_text);
|
||||
doc.add_text(text_field, "title");
|
||||
doc.prepare_for_store();
|
||||
|
||||
assert_eq!(doc.field_values().len(), 2);
|
||||
|
||||
match doc.field_values()[0].value() {
|
||||
Value::Str(ref text) => assert_eq!(text, "A"),
|
||||
_ => panic!("Incorrect variant of Value"),
|
||||
}
|
||||
|
||||
match doc.field_values()[1].value() {
|
||||
Value::Str(ref text) => assert_eq!(text, "title"),
|
||||
_ => panic!("Incorrect variant of Value"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::schema::TextFieldIndexing;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{IntOptions, TextOptions};
|
||||
use crate::tokenizer::PreTokenizedString;
|
||||
use chrono::{FixedOffset, Utc};
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
/// Possible error that may occur while parsing a field value
|
||||
@@ -124,13 +125,20 @@ impl FieldType {
|
||||
pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> {
|
||||
match *json {
|
||||
JsonValue::String(ref field_text) => match *self {
|
||||
FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {
|
||||
Err(ValueParsingError::TypeError(format!(
|
||||
"Expected an integer, got {:?}",
|
||||
json
|
||||
)))
|
||||
FieldType::Date(_) => {
|
||||
let dt_with_fixed_tz: chrono::DateTime<FixedOffset> =
|
||||
chrono::DateTime::parse_from_rfc3339(field_text).map_err(|err|
|
||||
ValueParsingError::TypeError(format!(
|
||||
"Failed to parse date from JSON. Expected rfc3339 format, got {}. {:?}",
|
||||
field_text, err
|
||||
))
|
||||
)?;
|
||||
Ok(Value::Date(dt_with_fixed_tz.with_timezone(&Utc)))
|
||||
}
|
||||
FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) => Err(
|
||||
ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)),
|
||||
),
|
||||
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
|
||||
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
|
||||
ValueParsingError::InvalidBase64(format!(
|
||||
@@ -208,7 +216,35 @@ mod tests {
|
||||
use crate::schema::field_type::ValueParsingError;
|
||||
use crate::schema::TextOptions;
|
||||
use crate::schema::Value;
|
||||
use crate::schema::{Schema, INDEXED};
|
||||
use crate::tokenizer::{PreTokenizedString, Token};
|
||||
use crate::{DateTime, Document};
|
||||
use chrono::{NaiveDate, NaiveDateTime, NaiveTime, Utc};
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_json_date() {
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
|
||||
let doc = schema.parse_document(doc_json).unwrap();
|
||||
let date = doc.get_first(date_field).unwrap();
|
||||
assert_eq!(format!("{:?}", date), "Date(2019-10-12T05:20:50.520Z)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_json_date() {
|
||||
let mut doc = Document::new();
|
||||
let mut schema_builder = Schema::builder();
|
||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let naive_date = NaiveDate::from_ymd(1982, 9, 17);
|
||||
let naive_time = NaiveTime::from_hms(13, 20, 00);
|
||||
let date_time = DateTime::from_utc(NaiveDateTime::new(naive_date, naive_time), Utc);
|
||||
doc.add_date(date_field, &date_time);
|
||||
let doc_json = schema.to_json(&doc);
|
||||
assert_eq!(doc_json, r#"{"date":["1982-09-17T13:20:00+00:00"]}"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_value_from_json() {
|
||||
|
||||
@@ -53,7 +53,7 @@ where
|
||||
fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output {
|
||||
SchemaFlagList {
|
||||
head: head.head,
|
||||
tail: self.clone(),
|
||||
tail: self,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ We can split the problem of generating a search result page into two phases :
|
||||
the search results page. (`doc_ids[] -> Document[]`)
|
||||
|
||||
In the first phase, the ability to search for documents by the given field is determined by the
|
||||
[`TextIndexingOptions`](enum.TextIndexingOptions.html) of our
|
||||
[`IndexRecordOption`](enum.IndexRecordOption.html) of our
|
||||
[`TextOptions`](struct.TextOptions.html).
|
||||
|
||||
The effect of each possible setting is described more in detail
|
||||
|
||||
@@ -166,7 +166,7 @@ impl SchemaBuilder {
|
||||
}
|
||||
|
||||
/// Adds a field entry to the schema in build.
|
||||
fn add_field(&mut self, field_entry: FieldEntry) -> Field {
|
||||
pub fn add_field(&mut self, field_entry: FieldEntry) -> Field {
|
||||
let field = Field::from_field_id(self.fields.len() as u32);
|
||||
let field_name = field_entry.name().to_string();
|
||||
self.fields.push(field_entry);
|
||||
@@ -401,6 +401,7 @@ pub enum DocParsingError {
|
||||
mod tests {
|
||||
|
||||
use crate::schema::field_type::ValueParsingError;
|
||||
use crate::schema::int_options::Cardinality::SingleValue;
|
||||
use crate::schema::schema::DocParsingError::NotJSON;
|
||||
use crate::schema::*;
|
||||
use matches::{assert_matches, matches};
|
||||
@@ -715,4 +716,94 @@ mod tests {
|
||||
assert_matches!(json_err, Err(NotJSON(_)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_schema_add_field() {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let id_options = TextOptions::default().set_stored().set_indexing_options(
|
||||
TextFieldIndexing::default()
|
||||
.set_tokenizer("raw")
|
||||
.set_index_option(IndexRecordOption::Basic),
|
||||
);
|
||||
let timestamp_options = IntOptions::default()
|
||||
.set_stored()
|
||||
.set_indexed()
|
||||
.set_fast(SingleValue);
|
||||
schema_builder.add_text_field("_id", id_options);
|
||||
schema_builder.add_date_field("_timestamp", timestamp_options);
|
||||
|
||||
let schema_content = r#"[
|
||||
{
|
||||
"name": "text",
|
||||
"type": "text",
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
},
|
||||
"stored": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "popularity",
|
||||
"type": "i64",
|
||||
"options": {
|
||||
"indexed": false,
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
let tmp_schema: Schema =
|
||||
serde_json::from_str(&schema_content).expect("error while reading json");
|
||||
for (_field, field_entry) in tmp_schema.fields() {
|
||||
schema_builder.add_field(field_entry.clone());
|
||||
}
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let schema_json = serde_json::to_string_pretty(&schema).unwrap();
|
||||
let expected = r#"[
|
||||
{
|
||||
"name": "_id",
|
||||
"type": "text",
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "basic",
|
||||
"tokenizer": "raw"
|
||||
},
|
||||
"stored": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "_timestamp",
|
||||
"type": "date",
|
||||
"options": {
|
||||
"indexed": true,
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "text",
|
||||
"type": "text",
|
||||
"options": {
|
||||
"indexing": {
|
||||
"record": "position",
|
||||
"tokenizer": "default"
|
||||
},
|
||||
"stored": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "popularity",
|
||||
"type": "i64",
|
||||
"options": {
|
||||
"indexed": false,
|
||||
"fast": "single",
|
||||
"stored": true
|
||||
}
|
||||
}
|
||||
]"#;
|
||||
assert_eq!(schema_json, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ impl Serialize for Value {
|
||||
Value::U64(u) => serializer.serialize_u64(u),
|
||||
Value::I64(u) => serializer.serialize_i64(u),
|
||||
Value::F64(u) => serializer.serialize_f64(u),
|
||||
Value::Date(ref date) => serializer.serialize_i64(date.timestamp()),
|
||||
Value::Date(ref date) => serializer.serialize_str(&date.to_rfc3339()),
|
||||
Value::Facet(ref facet) => facet.serialize(serializer),
|
||||
Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes),
|
||||
}
|
||||
@@ -96,14 +96,14 @@ impl<'de> Deserialize<'de> for Value {
|
||||
formatter.write_str("a string or u32")
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
|
||||
Ok(Value::U64(v))
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
|
||||
Ok(Value::I64(v))
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
|
||||
Ok(Value::U64(v))
|
||||
}
|
||||
|
||||
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> {
|
||||
Ok(Value::F64(v))
|
||||
}
|
||||
@@ -209,8 +209,8 @@ impl From<f64> for Value {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DateTime> for Value {
|
||||
fn from(date_time: DateTime) -> Value {
|
||||
impl From<crate::DateTime> for Value {
|
||||
fn from(date_time: crate::DateTime) -> Value {
|
||||
Value::Date(date_time)
|
||||
}
|
||||
}
|
||||
@@ -362,3 +362,17 @@ mod binary_serialize {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Value;
|
||||
use crate::DateTime;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
fn test_serialize_date() {
|
||||
let value = Value::Date(DateTime::from_str("1996-12-20T00:39:57+00:00").unwrap());
|
||||
let serialized_value_json = serde_json::to_string_pretty(&value).unwrap();
|
||||
assert_eq!(serialized_value_json, r#""1996-12-20T00:39:57+00:00""#);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
extern crate lz4;
|
||||
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
/// Name of the compression scheme used in the doc store.
|
||||
///
|
||||
/// This name is appended to the version string of tantivy.
|
||||
pub const COMPRESSION: &'static str = "lz4";
|
||||
|
||||
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
||||
compressed.clear();
|
||||
let mut encoder = lz4::EncoderBuilder::new().build(compressed)?;
|
||||
|
||||
@@ -2,6 +2,11 @@ use snap;
|
||||
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
/// Name of the compression scheme used in the doc store.
|
||||
///
|
||||
/// This name is appended to the version string of tantivy.
|
||||
pub const COMPRESSION: &str = "snappy";
|
||||
|
||||
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
||||
compressed.clear();
|
||||
let mut encoder = snap::Writer::new(compressed);
|
||||
|
||||
@@ -42,12 +42,16 @@ pub use self::writer::StoreWriter;
|
||||
#[cfg(feature = "lz4")]
|
||||
mod compression_lz4;
|
||||
#[cfg(feature = "lz4")]
|
||||
use self::compression_lz4::*;
|
||||
pub use self::compression_lz4::COMPRESSION;
|
||||
#[cfg(feature = "lz4")]
|
||||
use self::compression_lz4::{compress, decompress};
|
||||
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
mod compression_snap;
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
use self::compression_snap::*;
|
||||
pub use self::compression_snap::COMPRESSION;
|
||||
#[cfg(not(feature = "lz4"))]
|
||||
use self::compression_snap::{compress, decompress};
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
//!
|
||||
//! let tokenizer = RawTokenizer
|
||||
//! .filter(AlphaNumOnlyFilter);
|
||||
//!
|
||||
@@ -20,7 +18,6 @@
|
||||
//! assert!(stream.next().is_some());
|
||||
//! // the "emoji" is dropped because its not an alphanum
|
||||
//! assert!(stream.next().is_none());
|
||||
//! # }
|
||||
//! ```
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
//! ```rust
|
||||
//! use tantivy::schema::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
//! let mut schema_builder = Schema::builder();
|
||||
//!
|
||||
//! let text_options = TextOptions::default()
|
||||
@@ -31,7 +30,6 @@
|
||||
//! schema_builder.add_text_field("uuid", id_options);
|
||||
//!
|
||||
//! let schema = schema_builder.build();
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! By default, `tantivy` offers the following tokenizers:
|
||||
@@ -66,12 +64,10 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
//! let en_stem = SimpleTokenizer
|
||||
//! .filter(RemoveLongFilter::limit(40))
|
||||
//! .filter(LowerCaser)
|
||||
//! .filter(Stemmer::new(Language::English));
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! Once your tokenizer is defined, you need to
|
||||
@@ -81,13 +77,12 @@
|
||||
//! # use tantivy::schema::Schema;
|
||||
//! # use tantivy::tokenizer::*;
|
||||
//! # use tantivy::Index;
|
||||
//! # fn main() {
|
||||
//! # let custom_en_tokenizer = SimpleTokenizer;
|
||||
//! #
|
||||
//! let custom_en_tokenizer = SimpleTokenizer;
|
||||
//! # let schema = Schema::builder().build();
|
||||
//! let index = Index::create_in_ram(schema);
|
||||
//! index.tokenizers()
|
||||
//! .register("custom_en", custom_en_tokenizer);
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! If you built your schema programmatically, a complete example
|
||||
@@ -102,7 +97,6 @@
|
||||
//! use tantivy::tokenizer::*;
|
||||
//! use tantivy::Index;
|
||||
//!
|
||||
//! # fn main() {
|
||||
//! let mut schema_builder = Schema::builder();
|
||||
//! let text_field_indexing = TextFieldIndexing::default()
|
||||
//! .set_tokenizer("custom_en")
|
||||
@@ -121,8 +115,6 @@
|
||||
//! index
|
||||
//! .tokenizers()
|
||||
//! .register("custom_en", custom_en_tokenizer);
|
||||
//! // ...
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
mod alphanum_only;
|
||||
|
||||
@@ -31,7 +31,7 @@ use super::{Token, TokenStream, Tokenizer};
|
||||
///
|
||||
/// ```rust
|
||||
/// use tantivy::tokenizer::*;
|
||||
/// # fn main() {
|
||||
///
|
||||
/// let tokenizer = NgramTokenizer::new(2, 3, false);
|
||||
/// let mut stream = tokenizer.token_stream("hello");
|
||||
/// {
|
||||
@@ -77,7 +77,6 @@ use super::{Token, TokenStream, Tokenizer};
|
||||
/// assert_eq!(token.offset_to, 5);
|
||||
/// }
|
||||
/// assert!(stream.next().is_none());
|
||||
/// # }
|
||||
/// ```
|
||||
#[derive(Clone)]
|
||||
pub struct NgramTokenizer {
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
//!
|
||||
//! let tokenizer = SimpleTokenizer
|
||||
//! .filter(RemoveLongFilter::limit(5));
|
||||
//!
|
||||
@@ -12,7 +10,6 @@
|
||||
//! // out of the token stream.
|
||||
//! assert_eq!(stream.next().unwrap().text, "nice");
|
||||
//! assert!(stream.next().is_none());
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
|
||||
@@ -15,6 +15,7 @@ pub enum Language {
|
||||
Greek,
|
||||
Hungarian,
|
||||
Italian,
|
||||
Norwegian,
|
||||
Portuguese,
|
||||
Romanian,
|
||||
Russian,
|
||||
@@ -38,6 +39,7 @@ impl Language {
|
||||
Greek => Algorithm::Greek,
|
||||
Hungarian => Algorithm::Hungarian,
|
||||
Italian => Algorithm::Italian,
|
||||
Norwegian => Algorithm::Norwegian,
|
||||
Portuguese => Algorithm::Portuguese,
|
||||
Romanian => Algorithm::Romanian,
|
||||
Russian => Algorithm::Russian,
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
//! ```rust
|
||||
//! use tantivy::tokenizer::*;
|
||||
//!
|
||||
//! # fn main() {
|
||||
//! let tokenizer = SimpleTokenizer
|
||||
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
|
||||
//!
|
||||
@@ -10,7 +9,6 @@
|
||||
//! assert_eq!(stream.next().unwrap().text, "fox");
|
||||
//! assert_eq!(stream.next().unwrap().text, "crafty");
|
||||
//! assert!(stream.next().is_none());
|
||||
//! # }
|
||||
//! ```
|
||||
use super::{Token, TokenFilter, TokenStream};
|
||||
use fnv::FnvHasher;
|
||||
@@ -46,7 +44,7 @@ impl StopWordFilter {
|
||||
"there", "these", "they", "this", "to", "was", "will", "with",
|
||||
];
|
||||
|
||||
StopWordFilter::remove(words.iter().map(|s| s.to_string()).collect())
|
||||
StopWordFilter::remove(words.iter().map(|&s| s.to_string()).collect())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,12 +58,10 @@ pub trait Tokenizer<'a>: Sized + Clone {
|
||||
/// ```rust
|
||||
/// use tantivy::tokenizer::*;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let en_stem = SimpleTokenizer
|
||||
/// .filter(RemoveLongFilter::limit(40))
|
||||
/// .filter(LowerCaser)
|
||||
/// .filter(Stemmer::default());
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self>
|
||||
@@ -188,7 +186,6 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
|
||||
/// ```
|
||||
/// use tantivy::tokenizer::*;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let tokenizer = SimpleTokenizer
|
||||
/// .filter(RemoveLongFilter::limit(40))
|
||||
/// .filter(LowerCaser);
|
||||
@@ -207,7 +204,6 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
|
||||
/// assert_eq!(token.offset_to, 12);
|
||||
/// assert_eq!(token.position, 1);
|
||||
/// }
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
pub trait TokenStream {
|
||||
@@ -227,17 +223,15 @@ pub trait TokenStream {
|
||||
/// and `.token()`.
|
||||
///
|
||||
/// ```
|
||||
/// # use tantivy::tokenizer::*;
|
||||
/// #
|
||||
/// # fn main() {
|
||||
/// # let tokenizer = SimpleTokenizer
|
||||
/// # .filter(RemoveLongFilter::limit(40))
|
||||
/// # .filter(LowerCaser);
|
||||
/// use tantivy::tokenizer::*;
|
||||
///
|
||||
/// let tokenizer = SimpleTokenizer
|
||||
/// .filter(RemoveLongFilter::limit(40))
|
||||
/// .filter(LowerCaser);
|
||||
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
|
||||
/// while let Some(token) = token_stream.next() {
|
||||
/// println!("Token {:?}", token.text);
|
||||
/// }
|
||||
/// # }
|
||||
/// ```
|
||||
fn next(&mut self) -> Option<&Token> {
|
||||
if self.advance() {
|
||||
|
||||
Reference in New Issue
Block a user