mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2026-01-06 09:12:55 +00:00
Compare commits
3 Commits
poll-meta
...
refactorin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45d219b6e5 | ||
|
|
20aaaad04f | ||
|
|
ba0b89da36 |
12
.github/FUNDING.yml
vendored
12
.github/FUNDING.yml
vendored
@@ -1,12 +0,0 @@
|
|||||||
# These are supported funding model platforms
|
|
||||||
|
|
||||||
github: fulmicoton
|
|
||||||
patreon: # Replace with a single Patreon username
|
|
||||||
open_collective: # Replace with a single Open Collective username
|
|
||||||
ko_fi: # Replace with a single Ko-fi username
|
|
||||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
|
||||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
|
||||||
liberapay: # Replace with a single Liberapay username
|
|
||||||
issuehunt: # Replace with a single IssueHunt username
|
|
||||||
otechie: # Replace with a single Otechie username
|
|
||||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
|
||||||
34
CHANGELOG.md
34
CHANGELOG.md
@@ -1,27 +1,3 @@
|
|||||||
Tantivy 0.12.0
|
|
||||||
======================
|
|
||||||
- Removing static dispatch in tokenizers for simplicity. (#762)
|
|
||||||
|
|
||||||
## How to update?
|
|
||||||
|
|
||||||
Crates relying on custom tokenizer, or registering tokenizer in the manager will require some
|
|
||||||
minor changes. Check https://github.com/tantivy-search/tantivy/blob/master/examples/custom_tokenizer.rs
|
|
||||||
to check for some code sample.
|
|
||||||
|
|
||||||
Tantivy 0.11.3
|
|
||||||
=======================
|
|
||||||
- Fixed DateTime as a fast field (#735)
|
|
||||||
|
|
||||||
Tantivy 0.11.2
|
|
||||||
=======================
|
|
||||||
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
|
|
||||||
- Exposing a constructor for `WatchHandle` (#731)
|
|
||||||
|
|
||||||
Tantivy 0.11.1
|
|
||||||
=====================
|
|
||||||
- Bug fix #729
|
|
||||||
|
|
||||||
|
|
||||||
Tantivy 0.11.0
|
Tantivy 0.11.0
|
||||||
=====================
|
=====================
|
||||||
|
|
||||||
@@ -33,19 +9,13 @@ Tantivy 0.11.0
|
|||||||
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
- API change around `Box<BoxableTokenizer>`. See detail in #629
|
||||||
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
|
||||||
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
|
||||||
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
|
|
||||||
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
|
|
||||||
- Added handling of pre-tokenized text fields (#642), which will enable users to
|
|
||||||
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
|
|
||||||
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
|
|
||||||
|
|
||||||
## How to update?
|
## How to update?
|
||||||
|
|
||||||
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
|
|
||||||
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
|
||||||
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
|
||||||
an error and handling the `Result` is required.
|
an error and handling the `Result` is required.
|
||||||
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
|
|
||||||
|
|
||||||
Tantivy 0.10.2
|
Tantivy 0.10.2
|
||||||
=====================
|
=====================
|
||||||
|
|||||||
30
Cargo.toml
30
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tantivy"
|
name = "tantivy"
|
||||||
version = "0.11.3"
|
version = "0.11.0"
|
||||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
categories = ["database-implementations", "data-structures"]
|
categories = ["database-implementations", "data-structures"]
|
||||||
@@ -13,12 +13,12 @@ keywords = ["search", "information", "retrieval"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.11.0"
|
base64 = "0.10.0"
|
||||||
byteorder = "1.0"
|
byteorder = "1.0"
|
||||||
crc32fast = "1.2.0"
|
crc32fast = "1.2.0"
|
||||||
once_cell = "1.0"
|
once_cell = "1.0"
|
||||||
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
regex ={version = "1.3.0", default-features = false, features = ["std"]}
|
||||||
tantivy-fst = "0.2"
|
tantivy-fst = "0.1"
|
||||||
memmap = {version = "0.7", optional=true}
|
memmap = {version = "0.7", optional=true}
|
||||||
lz4 = {version="1.20", optional=true}
|
lz4 = {version="1.20", optional=true}
|
||||||
snap = {version="0.2"}
|
snap = {version="0.2"}
|
||||||
@@ -33,25 +33,27 @@ fs2={version="0.4", optional=true}
|
|||||||
itertools = "0.8"
|
itertools = "0.8"
|
||||||
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
||||||
notify = {version="4", optional=true}
|
notify = {version="4", optional=true}
|
||||||
uuid = { version = "0.8", features = ["v4", "serde"] }
|
bit-set = "0.5"
|
||||||
|
uuid = { version = "0.7.2", features = ["v4", "serde"] }
|
||||||
crossbeam = "0.7"
|
crossbeam = "0.7"
|
||||||
futures = {version = "0.3", features=["thread-pool"] }
|
futures = "0.1"
|
||||||
|
futures-cpupool = "0.1"
|
||||||
owning_ref = "0.4"
|
owning_ref = "0.4"
|
||||||
stable_deref_trait = "1.0.0"
|
stable_deref_trait = "1.0.0"
|
||||||
rust-stemmers = "1.2"
|
rust-stemmers = "1.1"
|
||||||
downcast-rs = { version="1.0" }
|
downcast-rs = { version="1.0" }
|
||||||
tantivy-query-grammar = { version="0.11", path="./query-grammar" }
|
tantivy-query-grammar = { path="./query-grammar" }
|
||||||
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
|
||||||
census = "0.4"
|
census = "0.2"
|
||||||
fnv = "1.0.6"
|
fnv = "1.0.6"
|
||||||
owned-read = "0.4"
|
owned-read = "0.4"
|
||||||
failure = "0.1"
|
failure = "0.1"
|
||||||
htmlescape = "0.3.1"
|
htmlescape = "0.3.1"
|
||||||
fail = "0.3"
|
fail = "0.3"
|
||||||
|
scoped-pool = "1.0"
|
||||||
murmurhash32 = "0.2"
|
murmurhash32 = "0.2"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
smallvec = "1.0"
|
smallvec = "0.6"
|
||||||
rayon = "1"
|
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.3"
|
winapi = "0.3"
|
||||||
@@ -62,10 +64,6 @@ maplit = "1"
|
|||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
time = "0.1.42"
|
time = "0.1.42"
|
||||||
|
|
||||||
[dev-dependencies.fail]
|
|
||||||
version = "0.3"
|
|
||||||
features = ["failpoints"]
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
debug = false
|
debug = false
|
||||||
@@ -89,6 +87,10 @@ members = ["query-grammar"]
|
|||||||
[badges]
|
[badges]
|
||||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||||
|
|
||||||
|
[dev-dependencies.fail]
|
||||||
|
version = "0.3"
|
||||||
|
features = ["failpoints"]
|
||||||
|
|
||||||
# Following the "fail" crate best practises, we isolate
|
# Following the "fail" crate best practises, we isolate
|
||||||
# tests that define specific behavior in fail check points
|
# tests that define specific behavior in fail check points
|
||||||
# in a different binary.
|
# in a different binary.
|
||||||
|
|||||||
@@ -13,100 +13,63 @@
|
|||||||
// ---
|
// ---
|
||||||
// Importing tantivy...
|
// Importing tantivy...
|
||||||
use tantivy::collector::FacetCollector;
|
use tantivy::collector::FacetCollector;
|
||||||
use tantivy::query::{AllQuery, TermQuery};
|
use tantivy::query::AllQuery;
|
||||||
use tantivy::schema::*;
|
use tantivy::schema::*;
|
||||||
use tantivy::{doc, Index};
|
use tantivy::{doc, Index};
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
fn main() -> tantivy::Result<()> {
|
||||||
// Let's create a temporary directory for the sake of this example
|
// Let's create a temporary directory for the
|
||||||
|
// sake of this example
|
||||||
|
let index_path = TempDir::new()?;
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
|
|
||||||
let name = schema_builder.add_text_field("felin_name", TEXT | STORED);
|
schema_builder.add_text_field("name", TEXT | STORED);
|
||||||
// this is our faceted field: its scientific classification
|
|
||||||
let classification = schema_builder.add_facet_field("classification");
|
// this is our faceted field
|
||||||
|
schema_builder.add_facet_field("tags");
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
|
|
||||||
let mut index_writer = index.writer(30_000_000)?;
|
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||||
|
|
||||||
|
let mut index_writer = index.writer(50_000_000)?;
|
||||||
|
|
||||||
|
let name = schema.get_field("name").unwrap();
|
||||||
|
let tags = schema.get_field("tags").unwrap();
|
||||||
|
|
||||||
// For convenience, tantivy also comes with a macro to
|
// For convenience, tantivy also comes with a macro to
|
||||||
// reduce the boilerplate above.
|
// reduce the boilerplate above.
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
name => "Cat",
|
name => "the ditch",
|
||||||
classification => Facet::from("/Felidae/Felinae/Felis")
|
tags => Facet::from("/pools/north")
|
||||||
));
|
));
|
||||||
|
|
||||||
index_writer.add_document(doc!(
|
index_writer.add_document(doc!(
|
||||||
name => "Canada lynx",
|
name => "little stacey",
|
||||||
classification => Facet::from("/Felidae/Felinae/Lynx")
|
tags => Facet::from("/pools/south")
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Cheetah",
|
|
||||||
classification => Facet::from("/Felidae/Felinae/Acinonyx")
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Tiger",
|
|
||||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Lion",
|
|
||||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Jaguar",
|
|
||||||
classification => Facet::from("/Felidae/Pantherinae/Panthera")
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Sunda clouded leopard",
|
|
||||||
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
name => "Fossa",
|
|
||||||
classification => Facet::from("/Eupleridae/Cryptoprocta")
|
|
||||||
));
|
));
|
||||||
|
|
||||||
index_writer.commit()?;
|
index_writer.commit()?;
|
||||||
|
|
||||||
let reader = index.reader()?;
|
let reader = index.reader()?;
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
{
|
|
||||||
let mut facet_collector = FacetCollector::for_field(classification);
|
|
||||||
facet_collector.add_facet("/Felidae");
|
|
||||||
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
|
||||||
// This lists all of the facet counts, right below "/Felidae".
|
|
||||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
|
|
||||||
assert_eq!(
|
|
||||||
facets,
|
|
||||||
vec![
|
|
||||||
(&Facet::from("/Felidae/Felinae"), 3),
|
|
||||||
(&Facet::from("/Felidae/Pantherinae"), 4),
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Facets are also searchable.
|
let mut facet_collector = FacetCollector::for_field(tags);
|
||||||
//
|
facet_collector.add_facet("/pools");
|
||||||
// For instance a common UI pattern is to allow the user someone to click on a facet link
|
|
||||||
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
|
|
||||||
//
|
|
||||||
// The search would then look as follows.
|
|
||||||
|
|
||||||
// Check the reference doc for different ways to create a `Facet` object.
|
let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
|
||||||
{
|
|
||||||
let facet = Facet::from_text("/Felidae/Pantherinae");
|
// This lists all of the facet counts
|
||||||
let facet_term = Term::from_facet(classification, &facet);
|
let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
|
||||||
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic);
|
assert_eq!(
|
||||||
let mut facet_collector = FacetCollector::for_field(classification);
|
facets,
|
||||||
facet_collector.add_facet("/Felidae/Pantherinae");
|
vec![
|
||||||
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?;
|
(&Facet::from("/pools/north"), 1),
|
||||||
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect();
|
(&Facet::from("/pools/south"), 1),
|
||||||
assert_eq!(
|
]
|
||||||
facets,
|
);
|
||||||
vec![
|
|
||||||
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
|
|
||||||
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,140 +0,0 @@
|
|||||||
// # Pre-tokenized text example
|
|
||||||
//
|
|
||||||
// This example shows how to use pre-tokenized text. Sometimes yout might
|
|
||||||
// want to index and search through text which is already split into
|
|
||||||
// tokens by some external tool.
|
|
||||||
//
|
|
||||||
// In this example we will:
|
|
||||||
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
|
|
||||||
// - import tokenized text straight from json,
|
|
||||||
// - perform a search on documents with pre-tokenized text
|
|
||||||
|
|
||||||
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, Tokenizer};
|
|
||||||
|
|
||||||
use tantivy::collector::{Count, TopDocs};
|
|
||||||
use tantivy::query::TermQuery;
|
|
||||||
use tantivy::schema::*;
|
|
||||||
use tantivy::{doc, Index, ReloadPolicy};
|
|
||||||
use tempfile::TempDir;
|
|
||||||
|
|
||||||
fn pre_tokenize_text(text: &str) -> Vec<Token> {
|
|
||||||
let mut token_stream = SimpleTokenizer.token_stream(text);
|
|
||||||
let mut tokens = vec![];
|
|
||||||
while token_stream.advance() {
|
|
||||||
tokens.push(token_stream.token().clone());
|
|
||||||
}
|
|
||||||
tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> tantivy::Result<()> {
|
|
||||||
let index_path = TempDir::new()?;
|
|
||||||
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
|
|
||||||
schema_builder.add_text_field("title", TEXT | STORED);
|
|
||||||
schema_builder.add_text_field("body", TEXT);
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
|
||||||
|
|
||||||
let mut index_writer = index.writer(50_000_000)?;
|
|
||||||
|
|
||||||
// We can create a document manually, by setting the fields
|
|
||||||
// one by one in a Document object.
|
|
||||||
let title = schema.get_field("title").unwrap();
|
|
||||||
let body = schema.get_field("body").unwrap();
|
|
||||||
|
|
||||||
let title_text = "The Old Man and the Sea";
|
|
||||||
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
|
|
||||||
|
|
||||||
// Content of our first document
|
|
||||||
// We create `PreTokenizedString` which contains original text and vector of tokens
|
|
||||||
let title_tok = PreTokenizedString {
|
|
||||||
text: String::from(title_text),
|
|
||||||
tokens: pre_tokenize_text(title_text),
|
|
||||||
};
|
|
||||||
|
|
||||||
println!(
|
|
||||||
"Original text: \"{}\" and tokens: {:?}",
|
|
||||||
title_tok.text, title_tok.tokens
|
|
||||||
);
|
|
||||||
|
|
||||||
let body_tok = PreTokenizedString {
|
|
||||||
text: String::from(body_text),
|
|
||||||
tokens: pre_tokenize_text(body_text),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Now lets create a document and add our `PreTokenizedString`
|
|
||||||
let old_man_doc = doc!(title => title_tok, body => body_tok);
|
|
||||||
|
|
||||||
// ... now let's just add it to the IndexWriter
|
|
||||||
index_writer.add_document(old_man_doc);
|
|
||||||
|
|
||||||
// Pretokenized text can also be fed as JSON
|
|
||||||
let short_man_json = r#"{
|
|
||||||
"title":[{
|
|
||||||
"text":"The Old Man",
|
|
||||||
"tokens":[
|
|
||||||
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
|
|
||||||
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
|
|
||||||
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
|
|
||||||
]
|
|
||||||
}]
|
|
||||||
}"#;
|
|
||||||
|
|
||||||
let short_man_doc = schema.parse_document(&short_man_json)?;
|
|
||||||
|
|
||||||
index_writer.add_document(short_man_doc);
|
|
||||||
|
|
||||||
// Let's commit changes
|
|
||||||
index_writer.commit()?;
|
|
||||||
|
|
||||||
// ... and now is the time to query our index
|
|
||||||
|
|
||||||
let reader = index
|
|
||||||
.reader_builder()
|
|
||||||
.reload_policy(ReloadPolicy::OnCommit)
|
|
||||||
.try_into()?;
|
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
|
|
||||||
// We want to get documents with token "Man", we will use TermQuery to do it
|
|
||||||
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
|
|
||||||
// and lowercasing, which preserves full words in their original form
|
|
||||||
let query = TermQuery::new(
|
|
||||||
Term::from_field_text(title, "Man"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let (top_docs, count) = searcher
|
|
||||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(count, 2);
|
|
||||||
|
|
||||||
// Now let's print out the results.
|
|
||||||
// Note that the tokens are not stored along with the original text
|
|
||||||
// in the document store
|
|
||||||
for (_score, doc_address) in top_docs {
|
|
||||||
let retrieved_doc = searcher.doc(doc_address)?;
|
|
||||||
println!("Document: {}", schema.to_json(&retrieved_doc));
|
|
||||||
}
|
|
||||||
|
|
||||||
// In contrary to the previous query, when we search for the "man" term we
|
|
||||||
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
|
|
||||||
// only splits text on whitespace / punctuation.
|
|
||||||
|
|
||||||
let query = TermQuery::new(
|
|
||||||
Term::from_field_text(title, "man"),
|
|
||||||
IndexRecordOption::Basic,
|
|
||||||
);
|
|
||||||
|
|
||||||
let (_top_docs, count) = searcher
|
|
||||||
.search(&query, &(TopDocs::with_limit(2), Count))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(count, 0);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -50,7 +50,7 @@ fn main() -> tantivy::Result<()> {
|
|||||||
|
|
||||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||||
// then removes all instances of `the` and `and` from the corpus
|
// then removes all instances of `the` and `and` from the corpus
|
||||||
let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
let tokenizer = SimpleTokenizer
|
||||||
.filter(LowerCaser)
|
.filter(LowerCaser)
|
||||||
.filter(StopWordFilter::remove(vec![
|
.filter(StopWordFilter::remove(vec![
|
||||||
"the".to_string(),
|
"the".to_string(),
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
# Tantivy Query Grammar
|
|
||||||
|
|
||||||
This crate is used by tantivy to parse queries.
|
|
||||||
@@ -13,29 +13,44 @@ use crate::SegmentReader;
|
|||||||
/// use tantivy::collector::Count;
|
/// use tantivy::collector::Count;
|
||||||
/// use tantivy::query::QueryParser;
|
/// use tantivy::query::QueryParser;
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, Index};
|
/// use tantivy::{doc, Index, Result};
|
||||||
///
|
///
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// # fn main() { example().unwrap(); }
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// fn example() -> Result<()> {
|
||||||
/// let schema = schema_builder.build();
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
/// let schema = schema_builder.build();
|
||||||
|
/// let index = Index::create_in_ram(schema);
|
||||||
|
/// {
|
||||||
|
/// let mut index_writer = index.writer(3_000_000)?;
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Name of the Wind",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Diary of Muadib",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "A Dairy Cow",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Diary of a Young Girl",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.commit().unwrap();
|
||||||
|
/// }
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
/// let reader = index.reader()?;
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// let searcher = reader.searcher();
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
|
||||||
/// assert!(index_writer.commit().is_ok());
|
|
||||||
///
|
///
|
||||||
/// let reader = index.reader().unwrap();
|
/// {
|
||||||
/// let searcher = reader.searcher();
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
/// let query = query_parser.parse_query("diary")?;
|
||||||
|
/// let count = searcher.search(&query, &Count).unwrap();
|
||||||
///
|
///
|
||||||
/// // Here comes the important part
|
/// assert_eq!(count, 2);
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
/// }
|
||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
|
||||||
/// let count = searcher.search(&query, &Count).unwrap();
|
|
||||||
///
|
///
|
||||||
/// assert_eq!(count, 2);
|
/// Ok(())
|
||||||
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub struct Count;
|
pub struct Count;
|
||||||
|
|
||||||
|
|||||||
@@ -86,6 +86,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// use tantivy::schema::{Facet, Schema, TEXT};
|
/// use tantivy::schema::{Facet, Schema, TEXT};
|
||||||
/// use tantivy::{doc, Index, Result};
|
/// use tantivy::{doc, Index, Result};
|
||||||
///
|
///
|
||||||
|
/// # fn main() { example().unwrap(); }
|
||||||
/// fn example() -> Result<()> {
|
/// fn example() -> Result<()> {
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// let mut schema_builder = Schema::builder();
|
||||||
///
|
///
|
||||||
@@ -126,7 +127,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// let searcher = reader.searcher();
|
/// let searcher = reader.searcher();
|
||||||
///
|
///
|
||||||
/// {
|
/// {
|
||||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||||
/// facet_collector.add_facet("/lang");
|
/// facet_collector.add_facet("/lang");
|
||||||
/// facet_collector.add_facet("/category");
|
/// facet_collector.add_facet("/category");
|
||||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
@@ -142,7 +143,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// {
|
/// {
|
||||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||||
/// facet_collector.add_facet("/category/fiction");
|
/// facet_collector.add_facet("/category/fiction");
|
||||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
///
|
///
|
||||||
@@ -157,8 +158,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
/// ]);
|
/// ]);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// {
|
/// {
|
||||||
/// let mut facet_collector = FacetCollector::for_field(facet);
|
/// let mut facet_collector = FacetCollector::for_field(facet);
|
||||||
/// facet_collector.add_facet("/category/fiction");
|
/// facet_collector.add_facet("/category/fiction");
|
||||||
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
|
||||||
///
|
///
|
||||||
@@ -171,7 +172,6 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
|
|||||||
///
|
///
|
||||||
/// Ok(())
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
/// # assert!(example().is_ok());
|
|
||||||
/// ```
|
/// ```
|
||||||
pub struct FacetCollector {
|
pub struct FacetCollector {
|
||||||
field: Field,
|
field: Field,
|
||||||
@@ -452,11 +452,9 @@ impl FacetCounts {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{FacetCollector, FacetCounts};
|
use super::{FacetCollector, FacetCounts};
|
||||||
use crate::collector::Count;
|
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::query::{AllQuery, QueryParser, TermQuery};
|
use crate::query::AllQuery;
|
||||||
use crate::schema::{Document, Facet, Field, IndexRecordOption, Schema};
|
use crate::schema::{Document, Facet, Field, Schema};
|
||||||
use crate::Term;
|
|
||||||
use rand::distributions::Uniform;
|
use rand::distributions::Uniform;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
@@ -546,56 +544,6 @@ mod tests {
|
|||||||
assert_eq!(facets[0].1, 1);
|
assert_eq!(facets[0].1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_doc_search_by_facet() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let facet_field = schema_builder.add_facet_field("facet");
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
facet_field => Facet::from_text(&"/A/A"),
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
facet_field => Facet::from_text(&"/A/B"),
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
facet_field => Facet::from_text(&"/A/C/A"),
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
facet_field => Facet::from_text(&"/D/C/A"),
|
|
||||||
));
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
assert_eq!(searcher.num_docs(), 4);
|
|
||||||
|
|
||||||
let count_facet = |facet_str: &str| {
|
|
||||||
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str));
|
|
||||||
searcher
|
|
||||||
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
|
|
||||||
.unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(count_facet("/"), 4);
|
|
||||||
assert_eq!(count_facet("/A"), 3);
|
|
||||||
assert_eq!(count_facet("/A/B"), 1);
|
|
||||||
assert_eq!(count_facet("/A/C"), 1);
|
|
||||||
assert_eq!(count_facet("/A/C/A"), 1);
|
|
||||||
assert_eq!(count_facet("/C/A"), 0);
|
|
||||||
{
|
|
||||||
let query_parser = QueryParser::for_index(&index, vec![]);
|
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("facet:/A/B").unwrap();
|
|
||||||
assert_eq!(1, searcher.search(&query, &Count).unwrap());
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let query = query_parser.parse_query("facet:/A").unwrap();
|
|
||||||
assert_eq!(3, searcher.search(&query, &Count).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_non_used_facet_collector() {
|
fn test_non_used_facet_collector() {
|
||||||
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
|
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0));
|
||||||
|
|||||||
@@ -108,35 +108,49 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
|
|||||||
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
|
/// use tantivy::collector::{Count, TopDocs, MultiCollector};
|
||||||
/// use tantivy::query::QueryParser;
|
/// use tantivy::query::QueryParser;
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, Index};
|
/// use tantivy::{doc, Index, Result};
|
||||||
///
|
///
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// # fn main() { example().unwrap(); }
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// fn example() -> Result<()> {
|
||||||
/// let schema = schema_builder.build();
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
/// let schema = schema_builder.build();
|
||||||
|
/// let index = Index::create_in_ram(schema);
|
||||||
|
/// {
|
||||||
|
/// let mut index_writer = index.writer(3_000_000)?;
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Name of the Wind",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Diary of Muadib",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "A Dairy Cow",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Diary of a Young Girl",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.commit().unwrap();
|
||||||
|
/// }
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer(3_000_000).unwrap();
|
/// let reader = index.reader()?;
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// let searcher = reader.searcher();
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
|
||||||
/// assert!(index_writer.commit().is_ok());
|
|
||||||
///
|
///
|
||||||
/// let reader = index.reader().unwrap();
|
/// let mut collectors = MultiCollector::new();
|
||||||
/// let searcher = reader.searcher();
|
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
||||||
|
/// let count_handle = collectors.add_collector(Count);
|
||||||
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
|
/// let query = query_parser.parse_query("diary")?;
|
||||||
|
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
|
||||||
///
|
///
|
||||||
/// let mut collectors = MultiCollector::new();
|
/// let count = count_handle.extract(&mut multi_fruit);
|
||||||
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
|
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
||||||
/// let count_handle = collectors.add_collector(Count);
|
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
|
||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
|
||||||
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
|
|
||||||
///
|
///
|
||||||
/// let count = count_handle.extract(&mut multi_fruit);
|
/// # assert_eq!(count, 2);
|
||||||
/// let top_docs = top_docs_handle.extract(&mut multi_fruit);
|
/// # assert_eq!(top_docs.len(), 2);
|
||||||
///
|
///
|
||||||
/// assert_eq!(count, 2);
|
/// Ok(())
|
||||||
/// assert_eq!(top_docs.len(), 2);
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
|
|||||||
@@ -12,9 +12,6 @@ use std::collections::BinaryHeap;
|
|||||||
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
|
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
|
||||||
/// default Rust heap is a max heap, whereas a min heap is needed.
|
/// default Rust heap is a max heap, whereas a min heap is needed.
|
||||||
///
|
///
|
||||||
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
|
|
||||||
/// address is used.
|
|
||||||
///
|
|
||||||
/// WARNING: equality is not what you would expect here.
|
/// WARNING: equality is not what you would expect here.
|
||||||
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
/// Two elements are equal if their feature is equal, and regardless of whether `doc`
|
||||||
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
/// is equal. This should be perfectly fine for this usage, but let's make sure this
|
||||||
@@ -24,37 +21,29 @@ struct ComparableDoc<T, D> {
|
|||||||
doc: D,
|
doc: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
Some(self.cmp(other))
|
Some(self.cmp(other))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn cmp(&self, other: &Self) -> Ordering {
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
// Reversed to make BinaryHeap work as a min-heap
|
other
|
||||||
let by_feature = other
|
|
||||||
.feature
|
.feature
|
||||||
.partial_cmp(&self.feature)
|
.partial_cmp(&self.feature)
|
||||||
.unwrap_or(Ordering::Equal);
|
.unwrap_or_else(|| Ordering::Equal)
|
||||||
|
|
||||||
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
|
|
||||||
|
|
||||||
// In case of a tie on the feature, we sort by ascending
|
|
||||||
// `DocAddress` in order to ensure a stable sorting of the
|
|
||||||
// documents.
|
|
||||||
by_feature.then_with(lazy_by_doc_address)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> {
|
impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
self.cmp(other) == Ordering::Equal
|
self.cmp(other) == Ordering::Equal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {}
|
impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
|
||||||
|
|
||||||
pub(crate) struct TopCollector<T> {
|
pub(crate) struct TopCollector<T> {
|
||||||
limit: usize,
|
limit: usize,
|
||||||
@@ -225,94 +214,4 @@ mod tests {
|
|||||||
]
|
]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_segment_collector_stable_ordering_for_equal_feature() {
|
|
||||||
// given that the documents are collected in ascending doc id order,
|
|
||||||
// when harvesting we have to guarantee stable sorting in case of a tie
|
|
||||||
// on the score
|
|
||||||
let doc_ids_collection = [4, 5, 6];
|
|
||||||
let score = 3.14;
|
|
||||||
|
|
||||||
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
|
|
||||||
for id in &doc_ids_collection {
|
|
||||||
top_collector_limit_2.collect(*id, score);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
|
|
||||||
for id in &doc_ids_collection {
|
|
||||||
top_collector_limit_3.collect(*id, score);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
top_collector_limit_2.harvest(),
|
|
||||||
top_collector_limit_3.harvest()[..2].to_vec(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
|
||||||
mod bench {
|
|
||||||
use super::TopSegmentCollector;
|
|
||||||
use test::Bencher;
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
|
|
||||||
let mut top_collector = TopSegmentCollector::new(0, 400);
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
for i in 0..100 {
|
|
||||||
top_collector.collect(i, 0.8);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
|
|
||||||
let mut top_collector = TopSegmentCollector::new(0, 100);
|
|
||||||
|
|
||||||
for i in 0..100 {
|
|
||||||
top_collector.collect(i, 0.8);
|
|
||||||
}
|
|
||||||
|
|
||||||
b.iter(|| {
|
|
||||||
for i in 0..100 {
|
|
||||||
top_collector.collect(i, 0.8);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
|
|
||||||
b.iter(|| {
|
|
||||||
let mut top_collector = TopSegmentCollector::new(0, 100);
|
|
||||||
|
|
||||||
for i in 0..100 {
|
|
||||||
top_collector.collect(i, 0.8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// it would be nice to be able to do the setup N times but still
|
|
||||||
// measure only harvest(). We can't since harvest() consumes
|
|
||||||
// the top_collector.
|
|
||||||
top_collector.harvest()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[bench]
|
|
||||||
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
|
|
||||||
b.iter(|| {
|
|
||||||
let mut top_collector = TopSegmentCollector::new(0, 100);
|
|
||||||
let mut score = 1.0;
|
|
||||||
|
|
||||||
for i in 0..100 {
|
|
||||||
score += 1.0;
|
|
||||||
top_collector.collect(i, score);
|
|
||||||
}
|
|
||||||
|
|
||||||
// it would be nice to be able to do the setup N times but still
|
|
||||||
// measure only harvest(). We can't since harvest() consumes
|
|
||||||
// the top_collector.
|
|
||||||
top_collector.harvest()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ use crate::collector::tweak_score_top_collector::TweakedScoreTopCollector;
|
|||||||
use crate::collector::{
|
use crate::collector::{
|
||||||
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
CustomScorer, CustomSegmentScorer, ScoreSegmentTweaker, ScoreTweaker, SegmentCollector,
|
||||||
};
|
};
|
||||||
use crate::fastfield::FastFieldReader;
|
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
@@ -16,43 +15,54 @@ use crate::SegmentLocalId;
|
|||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
/// The `TopDocs` collector keeps track of the top `K` documents
|
/// The Top Score Collector keeps track of the K documents
|
||||||
/// sorted by their score.
|
/// sorted by their score.
|
||||||
///
|
///
|
||||||
/// The implementation is based on a `BinaryHeap`.
|
/// The implementation is based on a `BinaryHeap`.
|
||||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||||
/// is `O(n log K)`.
|
/// is `O(n log K)`.
|
||||||
///
|
///
|
||||||
/// This collector guarantees a stable sorting in case of a tie on the
|
|
||||||
/// document score. As such, it is suitable to implement pagination.
|
|
||||||
///
|
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
/// use tantivy::query::QueryParser;
|
/// use tantivy::query::QueryParser;
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, DocAddress, Index};
|
/// use tantivy::{doc, DocAddress, Index, Result};
|
||||||
///
|
///
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// # fn main() { example().unwrap(); }
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// fn example() -> Result<()> {
|
||||||
/// let schema = schema_builder.build();
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
|
/// let schema = schema_builder.build();
|
||||||
|
/// let index = Index::create_in_ram(schema);
|
||||||
|
/// {
|
||||||
|
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Name of the Wind",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Diary of Muadib",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "A Dairy Cow",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.add_document(doc!(
|
||||||
|
/// title => "The Diary of a Young Girl",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.commit().unwrap();
|
||||||
|
/// }
|
||||||
///
|
///
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
/// let reader = index.reader()?;
|
||||||
/// index_writer.add_document(doc!(title => "The Name of the Wind"));
|
/// let searcher = reader.searcher();
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
|
|
||||||
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
|
|
||||||
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
|
|
||||||
/// assert!(index_writer.commit().is_ok());
|
|
||||||
///
|
///
|
||||||
/// let reader = index.reader().unwrap();
|
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||||
/// let searcher = reader.searcher();
|
/// let query = query_parser.parse_query("diary")?;
|
||||||
|
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
|
||||||
///
|
///
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
||||||
/// let query = query_parser.parse_query("diary").unwrap();
|
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
||||||
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
|
|
||||||
///
|
///
|
||||||
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
|
/// Ok(())
|
||||||
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub struct TopDocs(TopCollector<Score>);
|
pub struct TopDocs(TopCollector<Score>);
|
||||||
|
|
||||||
@@ -62,34 +72,6 @@ impl fmt::Debug for TopDocs {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ScorerByFastFieldReader {
|
|
||||||
ff_reader: FastFieldReader<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CustomSegmentScorer<u64> for ScorerByFastFieldReader {
|
|
||||||
fn score(&self, doc: DocId) -> u64 {
|
|
||||||
self.ff_reader.get_u64(u64::from(doc))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ScorerByField {
|
|
||||||
field: Field,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CustomScorer<u64> for ScorerByField {
|
|
||||||
type Child = ScorerByFastFieldReader;
|
|
||||||
|
|
||||||
fn segment_scorer(&self, segment_reader: &SegmentReader) -> crate::Result<Self::Child> {
|
|
||||||
let ff_reader = segment_reader
|
|
||||||
.fast_fields()
|
|
||||||
.u64(self.field)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
crate::Error::SchemaError(format!("Field requested is not a i64/u64 fast field."))
|
|
||||||
})?;
|
|
||||||
Ok(ScorerByFastFieldReader { ff_reader })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TopDocs {
|
impl TopDocs {
|
||||||
/// Creates a top score collector, with a number of documents equal to "limit".
|
/// Creates a top score collector, with a number of documents equal to "limit".
|
||||||
///
|
///
|
||||||
@@ -117,12 +99,15 @@ impl TopDocs {
|
|||||||
/// #
|
/// #
|
||||||
/// # let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||||
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64));
|
/// # index_writer.add_document(doc!(
|
||||||
|
/// # title => "The Name of the Wind",
|
||||||
|
/// # rating => 92u64,
|
||||||
|
/// # ));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
|
||||||
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
|
||||||
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
|
||||||
/// # assert!(index_writer.commit().is_ok());
|
/// # index_writer.commit()?;
|
||||||
/// # let reader = index.reader().unwrap();
|
/// # let reader = index.reader()?;
|
||||||
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
|
||||||
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
|
||||||
/// # assert_eq!(top_docs,
|
/// # assert_eq!(top_docs,
|
||||||
@@ -172,7 +157,14 @@ impl TopDocs {
|
|||||||
self,
|
self,
|
||||||
field: Field,
|
field: Field,
|
||||||
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
) -> impl Collector<Fruit = Vec<(u64, DocAddress)>> {
|
||||||
self.custom_score(ScorerByField { field })
|
self.custom_score(move |segment_reader: &SegmentReader| {
|
||||||
|
let ff_reader = segment_reader
|
||||||
|
.fast_fields()
|
||||||
|
.u64(field)
|
||||||
|
.expect("Field requested is not a i64/u64 fast field.");
|
||||||
|
//TODO error message missmatch actual behavior for i64
|
||||||
|
move |doc: DocId| ff_reader.get(doc)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ranks the documents using a custom score.
|
/// Ranks the documents using a custom score.
|
||||||
@@ -207,33 +199,27 @@ impl TopDocs {
|
|||||||
/// use tantivy::collector::TopDocs;
|
/// use tantivy::collector::TopDocs;
|
||||||
/// use tantivy::schema::Field;
|
/// use tantivy::schema::Field;
|
||||||
///
|
///
|
||||||
/// fn create_schema() -> Schema {
|
/// # fn create_schema() -> Schema {
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// # let mut schema_builder = Schema::builder();
|
||||||
/// schema_builder.add_text_field("product_name", TEXT);
|
/// # schema_builder.add_text_field("product_name", TEXT);
|
||||||
/// schema_builder.add_u64_field("popularity", FAST);
|
/// # schema_builder.add_u64_field("popularity", FAST);
|
||||||
/// schema_builder.build()
|
/// # schema_builder.build()
|
||||||
/// }
|
/// # }
|
||||||
///
|
/// #
|
||||||
/// fn create_index() -> tantivy::Result<Index> {
|
/// # fn main() -> tantivy::Result<()> {
|
||||||
/// let schema = create_schema();
|
/// # let schema = create_schema();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
/// # let product_name = index.schema().get_field("product_name").unwrap();
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// #
|
||||||
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
|
||||||
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
|
|
||||||
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
|
|
||||||
/// index_writer.commit()?;
|
|
||||||
/// Ok(index)
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// let index = create_index().unwrap();
|
|
||||||
/// let product_name = index.schema().get_field("product_name").unwrap();
|
|
||||||
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
/// let popularity: Field = index.schema().get_field("popularity").unwrap();
|
||||||
///
|
/// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
|
||||||
/// let user_query_str = "diary";
|
/// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
|
||||||
/// let query_parser = QueryParser::for_index(&index, vec![product_name]);
|
/// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
|
||||||
/// let query = query_parser.parse_query(user_query_str).unwrap();
|
/// # index_writer.commit()?;
|
||||||
|
/// // ...
|
||||||
|
/// # let user_query = "diary";
|
||||||
|
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
|
||||||
///
|
///
|
||||||
/// // This is where we build our collector with our custom score.
|
/// // This is where we build our collector with our custom score.
|
||||||
/// let top_docs_by_custom_score = TopDocs
|
/// let top_docs_by_custom_score = TopDocs
|
||||||
@@ -260,12 +246,15 @@ impl TopDocs {
|
|||||||
/// popularity_boost_score * original_score
|
/// popularity_boost_score * original_score
|
||||||
/// }
|
/// }
|
||||||
/// });
|
/// });
|
||||||
/// let reader = index.reader().unwrap();
|
/// # let reader = index.reader()?;
|
||||||
/// let searcher = reader.searcher();
|
/// # let searcher = reader.searcher();
|
||||||
/// // ... and here are our documents. Note this is a simple vec.
|
/// // ... and here are our documents. Note this is a simple vec.
|
||||||
/// // The `Score` in the pair is our tweaked score.
|
/// // The `Score` in the pair is our tweaked score.
|
||||||
/// let resulting_docs: Vec<(Score, DocAddress)> =
|
/// let resulting_docs: Vec<(Score, DocAddress)> =
|
||||||
/// searcher.search(&query, &top_docs_by_custom_score).unwrap();
|
/// searcher.search(&*query, &top_docs_by_custom_score)?;
|
||||||
|
///
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// # See also
|
/// # See also
|
||||||
@@ -439,13 +428,12 @@ impl SegmentCollector for TopScoreSegmentCollector {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::TopDocs;
|
use super::TopDocs;
|
||||||
use crate::collector::Collector;
|
use crate::collector::Collector;
|
||||||
use crate::query::{AllQuery, Query, QueryParser};
|
use crate::query::{Query, QueryParser};
|
||||||
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
use crate::schema::{Field, Schema, FAST, STORED, TEXT};
|
||||||
use crate::DocAddress;
|
use crate::DocAddress;
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Score;
|
use crate::Score;
|
||||||
use itertools::Itertools;
|
|
||||||
|
|
||||||
fn make_index() -> Index {
|
fn make_index() -> Index {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
@@ -506,29 +494,6 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_top_collector_stable_sorting() {
|
|
||||||
let index = make_index();
|
|
||||||
|
|
||||||
// using AllQuery to get a constant score
|
|
||||||
let searcher = index.reader().unwrap().searcher();
|
|
||||||
|
|
||||||
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
|
|
||||||
|
|
||||||
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
|
|
||||||
|
|
||||||
// precondition for the test to be meaningful: we did get documents
|
|
||||||
// with the same score
|
|
||||||
assert!(page_1.iter().map(|result| result.0).all_equal());
|
|
||||||
assert!(page_2.iter().map(|result| result.0).all_equal());
|
|
||||||
|
|
||||||
// sanity check since we're relying on make_index()
|
|
||||||
assert_eq!(page_1.len(), 2);
|
|
||||||
assert_eq!(page_2.len(), 3);
|
|
||||||
|
|
||||||
assert_eq!(page_1, &page_2[..page_1.len()]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn test_top_0() {
|
fn test_top_0() {
|
||||||
@@ -594,6 +559,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[should_panic(expected = "Field requested is not a i64/u64 fast field")]
|
||||||
fn test_field_not_fast_field() {
|
fn test_field_not_fast_field() {
|
||||||
let mut schema_builder = Schema::builder();
|
let mut schema_builder = Schema::builder();
|
||||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||||
@@ -608,12 +574,7 @@ mod tests {
|
|||||||
let searcher = index.reader().unwrap().searcher();
|
let searcher = index.reader().unwrap().searcher();
|
||||||
let segment = searcher.segment_reader(0);
|
let segment = searcher.segment_reader(0);
|
||||||
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
let top_collector = TopDocs::with_limit(4).order_by_u64_field(size);
|
||||||
let err = top_collector.for_segment(0, segment);
|
assert!(top_collector.for_segment(0, segment).is_ok());
|
||||||
if let Err(crate::Error::SchemaError(msg)) = err {
|
|
||||||
assert_eq!(msg, "Field requested is not a i64/u64 fast field.");
|
|
||||||
} else {
|
|
||||||
assert!(false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index(
|
fn index(
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::Result;
|
use crate::Result;
|
||||||
use crossbeam::channel;
|
use crossbeam::channel;
|
||||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
use scoped_pool::{Pool, ThreadConfig};
|
||||||
|
|
||||||
/// Search executor whether search request are single thread or multithread.
|
/// Search executor whether search request are single thread or multithread.
|
||||||
///
|
///
|
||||||
@@ -11,7 +11,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
|
|||||||
/// used by the client. Second, we may stop using rayon in the future.
|
/// used by the client. Second, we may stop using rayon in the future.
|
||||||
pub enum Executor {
|
pub enum Executor {
|
||||||
SingleThread,
|
SingleThread,
|
||||||
ThreadPool(ThreadPool),
|
ThreadPool(Pool),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Executor {
|
impl Executor {
|
||||||
@@ -21,12 +21,10 @@ impl Executor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Creates an Executor that dispatches the tasks in a thread pool.
|
// Creates an Executor that dispatches the tasks in a thread pool.
|
||||||
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> {
|
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
|
||||||
let pool = ThreadPoolBuilder::new()
|
let thread_config = ThreadConfig::new().prefix(prefix);
|
||||||
.num_threads(num_threads)
|
let pool = Pool::with_thread_config(num_threads, thread_config);
|
||||||
.thread_name(move |num| format!("{}{}", prefix, num))
|
Executor::ThreadPool(pool)
|
||||||
.build()?;
|
|
||||||
Ok(Executor::ThreadPool(pool))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform a map in the thread pool.
|
// Perform a map in the thread pool.
|
||||||
@@ -50,9 +48,9 @@ impl Executor {
|
|||||||
let num_fruits = args_with_indices.len();
|
let num_fruits = args_with_indices.len();
|
||||||
let fruit_receiver = {
|
let fruit_receiver = {
|
||||||
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
let (fruit_sender, fruit_receiver) = channel::unbounded();
|
||||||
pool.scope(|scope| {
|
pool.scoped(|scope| {
|
||||||
for arg_with_idx in args_with_indices {
|
for arg_with_idx in args_with_indices {
|
||||||
scope.spawn(|_| {
|
scope.execute(|| {
|
||||||
let (idx, arg) = arg_with_idx;
|
let (idx, arg) = arg_with_idx;
|
||||||
let fruit = f(arg);
|
let fruit = f(arg);
|
||||||
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
if let Err(err) = fruit_sender.send((idx, fruit)) {
|
||||||
@@ -105,7 +103,6 @@ mod tests {
|
|||||||
#[should_panic] //< unfortunately the panic message is not propagated
|
#[should_panic] //< unfortunately the panic message is not propagated
|
||||||
fn test_panic_propagates_multi_thread() {
|
fn test_panic_propagates_multi_thread() {
|
||||||
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
|
let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
|
||||||
.unwrap()
|
|
||||||
.map(
|
.map(
|
||||||
|_| {
|
|_| {
|
||||||
panic!("panic should propagate");
|
panic!("panic should propagate");
|
||||||
@@ -129,7 +126,6 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_map_multithread() {
|
fn test_map_multithread() {
|
||||||
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
let result: Vec<usize> = Executor::multi_thread(3, "search-test")
|
||||||
.unwrap()
|
|
||||||
.map(|i| Ok(i * 2), 0..10)
|
.map(|i| Ok(i * 2), 0..10)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(result.len(), 10);
|
assert_eq!(result.len(), 10);
|
||||||
|
|||||||
@@ -20,7 +20,8 @@ use crate::reader::IndexReaderBuilder;
|
|||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::tokenizer::{TextAnalyzer, TokenizerManager};
|
use crate::tokenizer::BoxedTokenizer;
|
||||||
|
use crate::tokenizer::TokenizerManager;
|
||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use num_cpus;
|
use num_cpus;
|
||||||
@@ -72,16 +73,15 @@ impl Index {
|
|||||||
|
|
||||||
/// Replace the default single thread search executor pool
|
/// Replace the default single thread search executor pool
|
||||||
/// by a thread pool with a given number of threads.
|
/// by a thread pool with a given number of threads.
|
||||||
pub fn set_multithread_executor(&mut self, num_threads: usize) -> Result<()> {
|
pub fn set_multithread_executor(&mut self, num_threads: usize) {
|
||||||
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?);
|
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replace the default single thread search executor pool
|
/// Replace the default single thread search executor pool
|
||||||
/// by a thread pool with a given number of threads.
|
/// by a thread pool with a given number of threads.
|
||||||
pub fn set_default_multithread_executor(&mut self) -> Result<()> {
|
pub fn set_default_multithread_executor(&mut self) {
|
||||||
let default_num_threads = num_cpus::get();
|
let default_num_threads = num_cpus::get();
|
||||||
self.set_multithread_executor(default_num_threads)
|
self.set_multithread_executor(default_num_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new index using the `RAMDirectory`.
|
/// Creates a new index using the `RAMDirectory`.
|
||||||
@@ -103,21 +103,23 @@ impl Index {
|
|||||||
if Index::exists(&mmap_directory) {
|
if Index::exists(&mmap_directory) {
|
||||||
return Err(TantivyError::IndexAlreadyExists);
|
return Err(TantivyError::IndexAlreadyExists);
|
||||||
}
|
}
|
||||||
|
|
||||||
Index::create(mmap_directory, schema)
|
Index::create(mmap_directory, schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Opens or creates a new index in the provided directory
|
/// Opens or creates a new index in the provided directory
|
||||||
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
|
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
|
||||||
if !Index::exists(&dir) {
|
if Index::exists(&dir) {
|
||||||
return Index::create(dir, schema);
|
let index = Index::open(dir)?;
|
||||||
}
|
if index.schema() == schema {
|
||||||
let index = Index::open(dir)?;
|
Ok(index)
|
||||||
if index.schema() == schema {
|
} else {
|
||||||
Ok(index)
|
Err(TantivyError::SchemaError(
|
||||||
|
"An index exists but the schema does not match.".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
Err(TantivyError::SchemaError(
|
Index::create(dir, schema)
|
||||||
"An index exists but the schema does not match.".to_string(),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,11 +174,11 @@ impl Index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Helper to access the tokenizer associated to a specific field.
|
/// Helper to access the tokenizer associated to a specific field.
|
||||||
pub fn tokenizer_for_field(&self, field: Field) -> Result<TextAnalyzer> {
|
pub fn tokenizer_for_field(&self, field: Field) -> Result<BoxedTokenizer> {
|
||||||
let field_entry = self.schema.get_field_entry(field);
|
let field_entry = self.schema.get_field_entry(field);
|
||||||
let field_type = field_entry.field_type();
|
let field_type = field_entry.field_type();
|
||||||
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
||||||
let tokenizer_name_opt: Option<TextAnalyzer> = match field_type {
|
let tokenizer_name_opt: Option<BoxedTokenizer> = match field_type {
|
||||||
FieldType::Str(text_options) => text_options
|
FieldType::Str(text_options) => text_options
|
||||||
.get_indexing_options()
|
.get_indexing_options()
|
||||||
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
||||||
@@ -385,9 +387,12 @@ mod tests {
|
|||||||
use crate::directory::RAMDirectory;
|
use crate::directory::RAMDirectory;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::{Schema, INDEXED, TEXT};
|
use crate::schema::{Schema, INDEXED, TEXT};
|
||||||
|
use crate::Index;
|
||||||
use crate::IndexReader;
|
use crate::IndexReader;
|
||||||
|
use crate::IndexWriter;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use crate::{Directory, Index};
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_indexer_for_field() {
|
fn test_indexer_for_field() {
|
||||||
@@ -465,14 +470,14 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
mod mmap_specific {
|
mod mmap_specific {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::Directory;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
@@ -483,20 +488,22 @@ mod tests {
|
|||||||
let tempdir = TempDir::new().unwrap();
|
let tempdir = TempDir::new().unwrap();
|
||||||
let tempdir_path = PathBuf::from(tempdir.path());
|
let tempdir_path = PathBuf::from(tempdir.path());
|
||||||
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
|
||||||
|
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
writer.commit().unwrap();
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::OnCommit)
|
.reload_policy(ReloadPolicy::OnCommit)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &index, &reader);
|
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_index_manual_policy_mmap() {
|
fn test_index_manual_policy_mmap() {
|
||||||
let schema = throw_away_schema();
|
let schema = throw_away_schema();
|
||||||
let field = schema.get_field("num_likes").unwrap();
|
let field = schema.get_field("num_likes").unwrap();
|
||||||
let mut index = Index::create_from_tempdir(schema).unwrap();
|
let index = Index::create_from_tempdir(schema).unwrap();
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let reader = index
|
let reader = index
|
||||||
@@ -506,12 +513,8 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
|
||||||
let _handle = index.directory_mut().watch(Box::new(move || {
|
|
||||||
let _ = sender.send(());
|
|
||||||
}));
|
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
thread::sleep(Duration::from_millis(500));
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
assert_eq!(reader.searcher().num_docs(), 1);
|
||||||
@@ -531,26 +534,39 @@ mod tests {
|
|||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
test_index_on_commit_reload_policy_aux(field, &write_index, &reader);
|
let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
|
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) {
|
fn test_index_on_commit_reload_policy_aux(
|
||||||
let mut reader_index = reader.index();
|
field: Field,
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
writer: &mut IndexWriter,
|
||||||
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || {
|
reader: &IndexReader,
|
||||||
let _ = sender.send(());
|
) {
|
||||||
}));
|
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
assert_eq!(reader.searcher().num_docs(), 0);
|
assert_eq!(reader.searcher().num_docs(), 0);
|
||||||
writer.add_document(doc!(field=>1u64));
|
writer.add_document(doc!(field=>1u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
let mut count = 0;
|
||||||
assert_eq!(reader.searcher().num_docs(), 1);
|
for _ in 0..100 {
|
||||||
|
count = reader.searcher().num_docs();
|
||||||
|
if count > 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
assert_eq!(count, 1);
|
||||||
writer.add_document(doc!(field=>2u64));
|
writer.add_document(doc!(field=>2u64));
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
let mut count = 0;
|
||||||
assert_eq!(reader.searcher().num_docs(), 2);
|
for _ in 0..10 {
|
||||||
|
count = reader.searcher().num_docs();
|
||||||
|
if count > 1 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
assert_eq!(count, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test will not pass on windows, because windows
|
// This test will not pass on windows, because windows
|
||||||
@@ -567,13 +583,9 @@ mod tests {
|
|||||||
for i in 0u64..8_000u64 {
|
for i in 0u64..8_000u64 {
|
||||||
writer.add_document(doc!(field => i));
|
writer.add_document(doc!(field => i));
|
||||||
}
|
}
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
|
||||||
let _handle = directory.watch(Box::new(move || {
|
|
||||||
let _ = sender.send(());
|
|
||||||
}));
|
|
||||||
writer.commit().unwrap();
|
writer.commit().unwrap();
|
||||||
let mem_right_after_commit = directory.total_mem_usage();
|
let mem_right_after_commit = directory.total_mem_usage();
|
||||||
assert!(receiver.recv().is_ok());
|
thread::sleep(Duration::from_millis(1_000));
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
@@ -587,11 +599,6 @@ mod tests {
|
|||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.num_docs(), 8_000);
|
assert_eq!(searcher.num_docs(), 8_000);
|
||||||
assert!(
|
assert!(mem_right_after_merge_finished < mem_right_after_commit);
|
||||||
mem_right_after_merge_finished < mem_right_after_commit,
|
|
||||||
"(mem after merge){} is expected < (mem before merge){}",
|
|
||||||
mem_right_after_merge_finished,
|
|
||||||
mem_right_after_commit
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -150,21 +150,6 @@ impl SegmentMeta {
|
|||||||
self.num_deleted_docs() > 0
|
self.num_deleted_docs() > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates the max_doc value from the `SegmentMeta`.
|
|
||||||
///
|
|
||||||
/// This method is only used when updating `max_doc` from 0
|
|
||||||
/// as we finalize a fresh new segment.
|
|
||||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> SegmentMeta {
|
|
||||||
assert_eq!(self.tracked.max_doc, 0);
|
|
||||||
assert!(self.tracked.deletes.is_none());
|
|
||||||
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
|
|
||||||
segment_id: inner_meta.segment_id,
|
|
||||||
max_doc,
|
|
||||||
deletes: None,
|
|
||||||
});
|
|
||||||
SegmentMeta { tracked }
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
|
||||||
let delete_meta = DeleteMeta {
|
let delete_meta = DeleteMeta {
|
||||||
@@ -300,9 +285,6 @@ mod tests {
|
|||||||
payload: None,
|
payload: None,
|
||||||
};
|
};
|
||||||
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
|
||||||
assert_eq!(
|
assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
|
||||||
json,
|
|
||||||
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -50,17 +50,6 @@ impl Segment {
|
|||||||
&self.meta
|
&self.meta
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates the max_doc value from the `SegmentMeta`.
|
|
||||||
///
|
|
||||||
/// This method is only used when updating `max_doc` from 0
|
|
||||||
/// as we finalize a fresh new segment.
|
|
||||||
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
|
|
||||||
Segment {
|
|
||||||
index: self.index,
|
|
||||||
meta: self.meta.with_max_doc(max_doc),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
|
||||||
Segment {
|
Segment {
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ impl SegmentId {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Error type used when parsing a `SegmentId` from a string fails.
|
/// Error type used when parsing a `SegmentId` from a string fails.
|
||||||
pub struct SegmentIdParseError(uuid::Error);
|
pub struct SegmentIdParseError(uuid::parser::ParseError);
|
||||||
|
|
||||||
impl Error for SegmentIdParseError {}
|
impl Error for SegmentIdParseError {}
|
||||||
|
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// Specifically, subsequent writes or flushes should
|
/// Specifically, subsequent writes or flushes should
|
||||||
/// have no effect on the returned `ReadOnlySource` object.
|
/// have no effect on the returned `ReadOnlySource` object.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [Directory::open_write].
|
/// You should only use this to read files create with [`open_write`]
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
|
||||||
|
|
||||||
/// Removes a file
|
/// Removes a file
|
||||||
@@ -160,7 +160,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
///
|
///
|
||||||
/// This should only be used for small files.
|
/// This should only be used for small files.
|
||||||
///
|
///
|
||||||
/// You should only use this to read files create with [Directory::atomic_write].
|
/// You should only use this to read files create with [`atomic_write`]
|
||||||
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
|
||||||
|
|
||||||
/// Atomically replace the content of a file with data.
|
/// Atomically replace the content of a file with data.
|
||||||
@@ -197,7 +197,7 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
|||||||
/// Registers a callback that will be called whenever a change on the `meta.json`
|
/// Registers a callback that will be called whenever a change on the `meta.json`
|
||||||
/// using the `atomic_write` API is detected.
|
/// using the `atomic_write` API is detected.
|
||||||
///
|
///
|
||||||
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other
|
/// The behavior when using `.watch()` on a file using `.open_write(...)` is, on the other
|
||||||
/// hand, undefined.
|
/// hand, undefined.
|
||||||
///
|
///
|
||||||
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use crate::Version;
|
|
||||||
use std::error::Error as StdError;
|
use std::error::Error as StdError;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io;
|
use std::io;
|
||||||
@@ -157,65 +156,6 @@ impl StdError for OpenWriteError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Type of index incompatibility between the library and the index found on disk
|
|
||||||
/// Used to catch and provide a hint to solve this incompatibility issue
|
|
||||||
pub enum Incompatibility {
|
|
||||||
/// This library cannot decompress the index found on disk
|
|
||||||
CompressionMismatch {
|
|
||||||
/// Compression algorithm used by the current version of tantivy
|
|
||||||
library_compression_format: String,
|
|
||||||
/// Compression algorithm that was used to serialise the index
|
|
||||||
index_compression_format: String,
|
|
||||||
},
|
|
||||||
/// The index format found on disk isn't supported by this version of the library
|
|
||||||
IndexMismatch {
|
|
||||||
/// Version used by the library
|
|
||||||
library_version: Version,
|
|
||||||
/// Version the index was built with
|
|
||||||
index_version: Version,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Incompatibility {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
|
||||||
match self {
|
|
||||||
Incompatibility::CompressionMismatch {
|
|
||||||
library_compression_format,
|
|
||||||
index_compression_format,
|
|
||||||
} => {
|
|
||||||
let err = format!(
|
|
||||||
"Library was compiled with {:?} compression, index was compressed with {:?}",
|
|
||||||
library_compression_format, index_compression_format
|
|
||||||
);
|
|
||||||
let advice = format!(
|
|
||||||
"Change the feature flag to {:?} and rebuild the library",
|
|
||||||
index_compression_format
|
|
||||||
);
|
|
||||||
write!(f, "{}. {}", err, advice)?;
|
|
||||||
}
|
|
||||||
Incompatibility::IndexMismatch {
|
|
||||||
library_version,
|
|
||||||
index_version,
|
|
||||||
} => {
|
|
||||||
let err = format!(
|
|
||||||
"Library version: {}, index version: {}",
|
|
||||||
library_version.index_format_version, index_version.index_format_version
|
|
||||||
);
|
|
||||||
// TODO make a more useful error message
|
|
||||||
// include the version range that supports this index_format_version
|
|
||||||
let advice = format!(
|
|
||||||
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
|
|
||||||
and rebuild your project.",
|
|
||||||
index_version.index_format_version, index_version.major, index_version.minor
|
|
||||||
);
|
|
||||||
write!(f, "{}. {}", err, advice)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error that may occur when accessing a file read
|
/// Error that may occur when accessing a file read
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum OpenReadError {
|
pub enum OpenReadError {
|
||||||
@@ -224,8 +164,6 @@ pub enum OpenReadError {
|
|||||||
/// Any kind of IO error that happens when
|
/// Any kind of IO error that happens when
|
||||||
/// interacting with the underlying IO device.
|
/// interacting with the underlying IO device.
|
||||||
IOError(IOError),
|
IOError(IOError),
|
||||||
/// This library doesn't support the index version found on disk
|
|
||||||
IncompatibleIndex(Incompatibility),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<IOError> for OpenReadError {
|
impl From<IOError> for OpenReadError {
|
||||||
@@ -245,9 +183,19 @@ impl fmt::Display for OpenReadError {
|
|||||||
"an io error occurred while opening a file for reading: '{}'",
|
"an io error occurred while opening a file for reading: '{}'",
|
||||||
err
|
err
|
||||||
),
|
),
|
||||||
OpenReadError::IncompatibleIndex(ref footer) => {
|
}
|
||||||
write!(f, "Incompatible index format: {:?}", footer)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl StdError for OpenReadError {
|
||||||
|
fn description(&self) -> &str {
|
||||||
|
"error occurred while opening a file for reading"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cause(&self) -> Option<&dyn StdError> {
|
||||||
|
match *self {
|
||||||
|
OpenReadError::FileDoesNotExist(_) => None,
|
||||||
|
OpenReadError::IOError(ref err) => Some(err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -268,12 +216,6 @@ impl From<IOError> for DeleteError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Incompatibility> for OpenReadError {
|
|
||||||
fn from(incompatibility: Incompatibility) -> Self {
|
|
||||||
OpenReadError::IncompatibleIndex(incompatibility)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for DeleteError {
|
impl fmt::Display for DeleteError {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
|
|||||||
@@ -1,175 +1,159 @@
|
|||||||
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
|
|
||||||
use crate::directory::error::Incompatibility;
|
|
||||||
use crate::directory::read_only_source::ReadOnlySource;
|
use crate::directory::read_only_source::ReadOnlySource;
|
||||||
use crate::directory::{AntiCallToken, TerminatingWrite};
|
use crate::directory::{AntiCallToken, TerminatingWrite};
|
||||||
use crate::Version;
|
use byteorder::{ByteOrder, LittleEndian};
|
||||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
type CrcHashU32 = u32;
|
const COMMON_FOOTER_SIZE: usize = 4 * 5;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub struct Footer {
|
pub struct Footer {
|
||||||
pub version: Version,
|
pub tantivy_version: (u32, u32, u32),
|
||||||
pub meta: String,
|
pub meta: String,
|
||||||
pub versioned_footer: VersionedFooter,
|
pub versioned_footer: VersionedFooter,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serialises the footer to a byte-array
|
|
||||||
/// - versioned_footer_len : 4 bytes
|
|
||||||
///- versioned_footer: variable bytes
|
|
||||||
/// - meta_len: 4 bytes
|
|
||||||
/// - meta: variable bytes
|
|
||||||
/// - version_len: 4 bytes
|
|
||||||
/// - version json: variable bytes
|
|
||||||
impl BinarySerializable for Footer {
|
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
BinarySerializable::serialize(&self.versioned_footer, writer)?;
|
|
||||||
BinarySerializable::serialize(&self.meta, writer)?;
|
|
||||||
let version_string =
|
|
||||||
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
|
|
||||||
BinarySerializable::serialize(&version_string, writer)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let versioned_footer = VersionedFooter::deserialize(reader)?;
|
|
||||||
let meta = String::deserialize(reader)?;
|
|
||||||
let version_json = String::deserialize(reader)?;
|
|
||||||
let version = serde_json::from_str(&version_json)?;
|
|
||||||
Ok(Footer {
|
|
||||||
version,
|
|
||||||
meta,
|
|
||||||
versioned_footer,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Footer {
|
impl Footer {
|
||||||
pub fn new(versioned_footer: VersionedFooter) -> Self {
|
pub fn new(versioned_footer: VersionedFooter) -> Self {
|
||||||
let version = crate::VERSION.clone();
|
let tantivy_version = (
|
||||||
let meta = version.to_string();
|
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
||||||
|
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
|
||||||
|
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
|
||||||
|
);
|
||||||
Footer {
|
Footer {
|
||||||
version,
|
tantivy_version,
|
||||||
meta,
|
meta: format!(
|
||||||
|
"tantivy {}.{}.{}, index v{}",
|
||||||
|
tantivy_version.0,
|
||||||
|
tantivy_version.1,
|
||||||
|
tantivy_version.2,
|
||||||
|
versioned_footer.version()
|
||||||
|
),
|
||||||
versioned_footer,
|
versioned_footer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> {
|
pub fn to_bytes(&self) -> Vec<u8> {
|
||||||
let mut counting_write = CountingWriter::wrap(&mut write);
|
let mut res = self.versioned_footer.to_bytes();
|
||||||
self.serialize(&mut counting_write)?;
|
res.extend_from_slice(self.meta.as_bytes());
|
||||||
let written_len = counting_write.written_bytes();
|
let len = res.len();
|
||||||
write.write_u32::<LittleEndian>(written_len as u32)?;
|
res.resize(len + COMMON_FOOTER_SIZE, 0);
|
||||||
Ok(())
|
let mut common_footer = &mut res[len..];
|
||||||
|
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
|
||||||
|
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
|
||||||
|
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
|
||||||
|
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
|
||||||
|
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
|
||||||
if source.len() < 4 {
|
let len = data.len();
|
||||||
|
if len < COMMON_FOOTER_SIZE + 4 {
|
||||||
|
// 4 bytes for index version, stored in versioned footer
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::UnexpectedEof,
|
||||||
|
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
|
||||||
|
if len < size as usize {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
io::ErrorKind::UnexpectedEof,
|
io::ErrorKind::UnexpectedEof,
|
||||||
format!(
|
format!(
|
||||||
"File corrupted. The file is smaller than 4 bytes (len={}).",
|
"File corrupted. The footer len is {}, while the entire file len is {}",
|
||||||
source.len()
|
size, len
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES);
|
let footer = &data[len - size as usize..];
|
||||||
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize;
|
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
|
||||||
let body_len = body_footer.len() - footer_len;
|
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
|
||||||
let (body, footer_data) = body_footer.split(body_len);
|
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
|
||||||
let mut cursor = footer_data.as_slice();
|
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
|
||||||
let footer = Footer::deserialize(&mut cursor)?;
|
Ok(Footer {
|
||||||
Ok((footer, body))
|
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
|
||||||
|
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
|
||||||
|
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Confirms that the index will be read correctly by this version of tantivy
|
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
|
||||||
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory
|
let footer = Footer::from_bytes(source.as_slice())?;
|
||||||
pub fn is_compatible(&self) -> Result<(), Incompatibility> {
|
let reader = source.slice_to(source.as_slice().len() - footer.size());
|
||||||
let library_version = crate::version();
|
Ok((footer, reader))
|
||||||
match &self.versioned_footer {
|
}
|
||||||
VersionedFooter::V1 {
|
|
||||||
crc32: _crc,
|
pub fn size(&self) -> usize {
|
||||||
store_compression: compression,
|
self.versioned_footer.size() as usize + self.meta.len() + 20
|
||||||
} => {
|
|
||||||
if &library_version.store_compression != compression {
|
|
||||||
return Err(Incompatibility::CompressionMismatch {
|
|
||||||
library_compression_format: library_version.store_compression.to_string(),
|
|
||||||
index_compression_format: compression.to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
|
|
||||||
library_version: library_version.clone(),
|
|
||||||
index_version: self.version.clone(),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Footer that includes a crc32 hash that enables us to checksum files in the index
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum VersionedFooter {
|
pub enum VersionedFooter {
|
||||||
UnknownVersion,
|
UnknownVersion { version: u32, size: u32 },
|
||||||
V1 {
|
V0(u32), // crc
|
||||||
crc32: CrcHashU32,
|
|
||||||
store_compression: String,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BinarySerializable for VersionedFooter {
|
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
match self {
|
|
||||||
VersionedFooter::V1 {
|
|
||||||
crc32,
|
|
||||||
store_compression: compression,
|
|
||||||
} => {
|
|
||||||
// Serializes a valid `VersionedFooter` or panics if the version is unknown
|
|
||||||
// [ version | crc_hash | compression_mode ]
|
|
||||||
// [ 0..4 | 4..8 | variable ]
|
|
||||||
BinarySerializable::serialize(&1u32, &mut buf)?;
|
|
||||||
BinarySerializable::serialize(crc32, &mut buf)?;
|
|
||||||
BinarySerializable::serialize(compression, &mut buf)?;
|
|
||||||
}
|
|
||||||
VersionedFooter::UnknownVersion => {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"Cannot serialize an unknown versioned footer ",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
|
|
||||||
writer.write_all(&buf[..])?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
let len = VInt::deserialize(reader)?.0 as usize;
|
|
||||||
let mut buf = vec![0u8; len];
|
|
||||||
reader.read_exact(&mut buf[..])?;
|
|
||||||
let mut cursor = &buf[..];
|
|
||||||
let version = u32::deserialize(&mut cursor)?;
|
|
||||||
if version == 1 {
|
|
||||||
let crc32 = u32::deserialize(&mut cursor)?;
|
|
||||||
let compression = String::deserialize(&mut cursor)?;
|
|
||||||
Ok(VersionedFooter::V1 {
|
|
||||||
crc32,
|
|
||||||
store_compression: compression,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(VersionedFooter::UnknownVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VersionedFooter {
|
impl VersionedFooter {
|
||||||
pub fn crc(&self) -> Option<CrcHashU32> {
|
pub fn to_bytes(&self) -> Vec<u8> {
|
||||||
match self {
|
match self {
|
||||||
VersionedFooter::V1 { crc32, .. } => Some(*crc32),
|
VersionedFooter::V0(crc) => {
|
||||||
|
let mut res = vec![0; 8];
|
||||||
|
LittleEndian::write_u32(&mut res, 0);
|
||||||
|
LittleEndian::write_u32(&mut res[4..], *crc);
|
||||||
|
res
|
||||||
|
}
|
||||||
|
VersionedFooter::UnknownVersion { .. } => {
|
||||||
|
panic!("Unsupported index should never get serialized");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
|
||||||
|
assert!(footer.len() >= 4);
|
||||||
|
let version = LittleEndian::read_u32(footer);
|
||||||
|
match version {
|
||||||
|
0 => {
|
||||||
|
if footer.len() == 8 {
|
||||||
|
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
|
||||||
|
} else {
|
||||||
|
Err(io::Error::new(
|
||||||
|
io::ErrorKind::UnexpectedEof,
|
||||||
|
format!(
|
||||||
|
"File corrupted. The versioned footer len is {}, while it should be 8",
|
||||||
|
footer.len()
|
||||||
|
),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
version => Ok(VersionedFooter::UnknownVersion {
|
||||||
|
version,
|
||||||
|
size: footer.len() as u32,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn size(&self) -> u32 {
|
||||||
|
match self {
|
||||||
|
VersionedFooter::V0(_) => 8,
|
||||||
|
VersionedFooter::UnknownVersion { size, .. } => *size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn version(&self) -> u32 {
|
||||||
|
match self {
|
||||||
|
VersionedFooter::V0(_) => 0,
|
||||||
|
VersionedFooter::UnknownVersion { version, .. } => *version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn crc(&self) -> Option<u32> {
|
||||||
|
match self {
|
||||||
|
VersionedFooter::V0(crc) => Some(*crc),
|
||||||
VersionedFooter::UnknownVersion { .. } => None,
|
VersionedFooter::UnknownVersion { .. } => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -205,135 +189,25 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
|
|||||||
|
|
||||||
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
|
||||||
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
|
||||||
let crc32 = self.hasher.take().unwrap().finalize();
|
let crc = self.hasher.take().unwrap().finalize();
|
||||||
let footer = Footer::new(VersionedFooter::V1 {
|
|
||||||
crc32,
|
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
|
||||||
store_compression: crate::store::COMPRESSION.to_string(),
|
|
||||||
});
|
|
||||||
let mut writer = self.writer.take().unwrap();
|
let mut writer = self.writer.take().unwrap();
|
||||||
footer.append_footer(&mut writer)?;
|
writer.write_all(&footer)?;
|
||||||
writer.terminate()
|
writer.terminate()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::CrcHashU32;
|
|
||||||
use super::FooterProxy;
|
|
||||||
use crate::common::BinarySerializable;
|
|
||||||
use crate::directory::footer::{Footer, VersionedFooter};
|
use crate::directory::footer::{Footer, VersionedFooter};
|
||||||
use crate::directory::TerminatingWrite;
|
|
||||||
use byteorder::{ByteOrder, LittleEndian};
|
|
||||||
use regex::Regex;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_versioned_footer() {
|
|
||||||
let mut vec = Vec::new();
|
|
||||||
let footer_proxy = FooterProxy::new(&mut vec);
|
|
||||||
assert!(footer_proxy.terminate().is_ok());
|
|
||||||
assert_eq!(vec.len(), 167);
|
|
||||||
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
|
|
||||||
if let VersionedFooter::V1 {
|
|
||||||
crc32: _,
|
|
||||||
store_compression,
|
|
||||||
} = footer.versioned_footer
|
|
||||||
{
|
|
||||||
assert_eq!(store_compression, crate::store::COMPRESSION);
|
|
||||||
} else {
|
|
||||||
panic!("Versioned footer should be V1.");
|
|
||||||
}
|
|
||||||
assert_eq!(&footer.version, crate::version());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_serialize_deserialize_footer() {
|
fn test_serialize_deserialize_footer() {
|
||||||
let mut buffer = Vec::new();
|
let crc = 123456;
|
||||||
let crc32 = 123456u32;
|
let footer = Footer::new(VersionedFooter::V0(crc));
|
||||||
let footer: Footer = Footer::new(VersionedFooter::V1 {
|
let footer_bytes = footer.to_bytes();
|
||||||
crc32,
|
|
||||||
store_compression: "lz4".to_string(),
|
|
||||||
});
|
|
||||||
footer.serialize(&mut buffer).unwrap();
|
|
||||||
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
|
|
||||||
assert_eq!(footer_deser, footer);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
|
||||||
fn footer_length() {
|
|
||||||
let crc32 = 1111111u32;
|
|
||||||
let versioned_footer = VersionedFooter::V1 {
|
|
||||||
crc32,
|
|
||||||
store_compression: "lz4".to_string(),
|
|
||||||
};
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
versioned_footer.serialize(&mut buf).unwrap();
|
|
||||||
assert_eq!(buf.len(), 13);
|
|
||||||
let footer = Footer::new(versioned_footer);
|
|
||||||
let regex_ptn = Regex::new(
|
|
||||||
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert!(regex_ptn.is_match(&footer.meta));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn versioned_footer_from_bytes() {
|
|
||||||
let v_footer_bytes = vec![
|
|
||||||
// versionned footer length
|
|
||||||
12 | 128,
|
|
||||||
// index format version
|
|
||||||
1,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
// crc 32
|
|
||||||
12,
|
|
||||||
35,
|
|
||||||
89,
|
|
||||||
18,
|
|
||||||
// compression format
|
|
||||||
3 | 128,
|
|
||||||
b'l',
|
|
||||||
b'z',
|
|
||||||
b'4',
|
|
||||||
];
|
|
||||||
let mut cursor = &v_footer_bytes[..];
|
|
||||||
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
|
|
||||||
assert!(cursor.is_empty());
|
|
||||||
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
|
|
||||||
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
|
|
||||||
crc32: expected_crc,
|
|
||||||
store_compression: "lz4".to_string(),
|
|
||||||
};
|
|
||||||
assert_eq!(versioned_footer, expected_versioned_footer);
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
assert!(versioned_footer.serialize(&mut buffer).is_ok());
|
|
||||||
assert_eq!(&v_footer_bytes[..], &buffer[..]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn versioned_footer_panic() {
|
|
||||||
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
|
|
||||||
let mut b = &v_footer_bytes[..];
|
|
||||||
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
|
|
||||||
assert!(b.is_empty());
|
|
||||||
let expected_versioned_footer = VersionedFooter::UnknownVersion;
|
|
||||||
assert_eq!(versioned_footer, expected_versioned_footer);
|
|
||||||
let mut buf = Vec::new();
|
|
||||||
assert!(versioned_footer.serialize(&mut buf).is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[cfg(not(feature = "lz4"))]
|
|
||||||
fn compression_mismatch() {
|
|
||||||
let crc32 = 1111111u32;
|
|
||||||
let versioned_footer = VersionedFooter::V1 {
|
|
||||||
crc32,
|
|
||||||
store_compression: "lz4".to_string(),
|
|
||||||
};
|
|
||||||
let footer = Footer::new(versioned_footer);
|
|
||||||
let res = footer.is_compatible();
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,14 +2,13 @@ use crate::core::MANAGED_FILEPATH;
|
|||||||
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::footer::{Footer, FooterProxy};
|
use crate::directory::footer::{Footer, FooterProxy};
|
||||||
use crate::directory::DirectoryLock;
|
use crate::directory::DirectoryLock;
|
||||||
use crate::directory::GarbageCollectionResult;
|
|
||||||
use crate::directory::Lock;
|
use crate::directory::Lock;
|
||||||
use crate::directory::META_LOCK;
|
use crate::directory::META_LOCK;
|
||||||
use crate::directory::{ReadOnlySource, WritePtr};
|
use crate::directory::{ReadOnlySource, WritePtr};
|
||||||
use crate::directory::{WatchCallback, WatchHandle};
|
use crate::directory::{WatchCallback, WatchHandle};
|
||||||
use crate::error::DataCorruption;
|
use crate::error::DataCorruption;
|
||||||
use crate::Directory;
|
use crate::Directory;
|
||||||
|
use crate::Result;
|
||||||
use crc32fast::Hasher;
|
use crc32fast::Hasher;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
@@ -65,7 +64,7 @@ fn save_managed_paths(
|
|||||||
|
|
||||||
impl ManagedDirectory {
|
impl ManagedDirectory {
|
||||||
/// Wraps a directory as managed directory.
|
/// Wraps a directory as managed directory.
|
||||||
pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> {
|
pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
|
||||||
match directory.atomic_read(&MANAGED_FILEPATH) {
|
match directory.atomic_read(&MANAGED_FILEPATH) {
|
||||||
Ok(data) => {
|
Ok(data) => {
|
||||||
let managed_files_json = String::from_utf8_lossy(&data);
|
let managed_files_json = String::from_utf8_lossy(&data);
|
||||||
@@ -88,11 +87,6 @@ impl ManagedDirectory {
|
|||||||
meta_informations: Arc::default(),
|
meta_informations: Arc::default(),
|
||||||
}),
|
}),
|
||||||
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
Err(OpenReadError::IOError(e)) => Err(From::from(e)),
|
||||||
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
|
|
||||||
// For the moment, this should never happen `meta.json`
|
|
||||||
// do not have any footer and cannot detect incompatibility.
|
|
||||||
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,10 +104,7 @@ impl ManagedDirectory {
|
|||||||
/// If a file cannot be deleted (for permission reasons for instance)
|
/// If a file cannot be deleted (for permission reasons for instance)
|
||||||
/// an error is simply logged, and the file remains in the list of managed
|
/// an error is simply logged, and the file remains in the list of managed
|
||||||
/// files.
|
/// files.
|
||||||
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(
|
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) {
|
||||||
&mut self,
|
|
||||||
get_living_files: L,
|
|
||||||
) -> crate::Result<GarbageCollectionResult> {
|
|
||||||
info!("Garbage collect");
|
info!("Garbage collect");
|
||||||
let mut files_to_delete = vec![];
|
let mut files_to_delete = vec![];
|
||||||
|
|
||||||
@@ -139,25 +130,19 @@ impl ManagedDirectory {
|
|||||||
// 2) writer change meta.json (for instance after a merge or a commit)
|
// 2) writer change meta.json (for instance after a merge or a commit)
|
||||||
// 3) gc kicks in.
|
// 3) gc kicks in.
|
||||||
// 4) gc removes a file that was useful for process B, before process B opened it.
|
// 4) gc removes a file that was useful for process B, before process B opened it.
|
||||||
match self.acquire_lock(&META_LOCK) {
|
if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) {
|
||||||
Ok(_meta_lock) => {
|
let living_files = get_living_files();
|
||||||
let living_files = get_living_files();
|
for managed_path in &meta_informations_rlock.managed_paths {
|
||||||
for managed_path in &meta_informations_rlock.managed_paths {
|
if !living_files.contains(managed_path) {
|
||||||
if !living_files.contains(managed_path) {
|
files_to_delete.push(managed_path.clone());
|
||||||
files_to_delete.push(managed_path.clone());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
} else {
|
||||||
error!("Failed to acquire lock for GC");
|
error!("Failed to acquire lock for GC");
|
||||||
return Err(crate::Error::from(err));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut failed_to_delete_files = vec![];
|
|
||||||
let mut deleted_files = vec![];
|
let mut deleted_files = vec![];
|
||||||
|
|
||||||
for file_to_delete in files_to_delete {
|
for file_to_delete in files_to_delete {
|
||||||
match self.delete(&file_to_delete) {
|
match self.delete(&file_to_delete) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
@@ -167,10 +152,9 @@ impl ManagedDirectory {
|
|||||||
Err(file_error) => {
|
Err(file_error) => {
|
||||||
match file_error {
|
match file_error {
|
||||||
DeleteError::FileDoesNotExist(_) => {
|
DeleteError::FileDoesNotExist(_) => {
|
||||||
deleted_files.push(file_to_delete.clone());
|
deleted_files.push(file_to_delete);
|
||||||
}
|
}
|
||||||
DeleteError::IOError(_) => {
|
DeleteError::IOError(_) => {
|
||||||
failed_to_delete_files.push(file_to_delete.clone());
|
|
||||||
if !cfg!(target_os = "windows") {
|
if !cfg!(target_os = "windows") {
|
||||||
// On windows, delete is expected to fail if the file
|
// On windows, delete is expected to fail if the file
|
||||||
// is mmapped.
|
// is mmapped.
|
||||||
@@ -193,13 +177,10 @@ impl ManagedDirectory {
|
|||||||
for delete_file in &deleted_files {
|
for delete_file in &deleted_files {
|
||||||
managed_paths_write.remove(delete_file);
|
managed_paths_write.remove(delete_file);
|
||||||
}
|
}
|
||||||
save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?;
|
if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
|
||||||
|
error!("Failed to save the list of managed files.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(GarbageCollectionResult {
|
|
||||||
deleted_files,
|
|
||||||
failed_to_delete_files,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Registers a file as managed
|
/// Registers a file as managed
|
||||||
@@ -266,9 +247,8 @@ impl ManagedDirectory {
|
|||||||
impl Directory for ManagedDirectory {
|
impl Directory for ManagedDirectory {
|
||||||
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
|
||||||
let read_only_source = self.directory.open_read(path)?;
|
let read_only_source = self.directory.open_read(path)?;
|
||||||
let (footer, reader) = Footer::extract_footer(read_only_source)
|
let (_footer, reader) = Footer::extract_footer(read_only_source)
|
||||||
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
|
||||||
footer.is_compatible()?;
|
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,8 +327,9 @@ mod tests_mmap_specific {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1));
|
||||||
assert!(managed_directory.exists(test_path2));
|
assert!(managed_directory.exists(test_path2));
|
||||||
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect();
|
let living_files: HashSet<PathBuf> =
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
[test_path1.to_owned()].into_iter().cloned().collect();
|
||||||
|
managed_directory.garbage_collect(|| living_files);
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1));
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2));
|
||||||
}
|
}
|
||||||
@@ -358,7 +339,7 @@ mod tests_mmap_specific {
|
|||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1));
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2));
|
||||||
let living_files: HashSet<PathBuf> = HashSet::new();
|
let living_files: HashSet<PathBuf> = HashSet::new();
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
managed_directory.garbage_collect(|| living_files);
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1));
|
||||||
assert!(!managed_directory.exists(test_path2));
|
assert!(!managed_directory.exists(test_path2));
|
||||||
}
|
}
|
||||||
@@ -380,9 +361,7 @@ mod tests_mmap_specific {
|
|||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1));
|
||||||
|
|
||||||
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
let _mmap_read = managed_directory.open_read(test_path1).unwrap();
|
||||||
assert!(managed_directory
|
managed_directory.garbage_collect(|| living_files.clone());
|
||||||
.garbage_collect(|| living_files.clone())
|
|
||||||
.is_ok());
|
|
||||||
if cfg!(target_os = "windows") {
|
if cfg!(target_os = "windows") {
|
||||||
// On Windows, gc should try and fail the file as it is mmapped.
|
// On Windows, gc should try and fail the file as it is mmapped.
|
||||||
assert!(managed_directory.exists(test_path1));
|
assert!(managed_directory.exists(test_path1));
|
||||||
@@ -390,7 +369,7 @@ mod tests_mmap_specific {
|
|||||||
drop(_mmap_read);
|
drop(_mmap_read);
|
||||||
// The file should still be in the list of managed file and
|
// The file should still be in the list of managed file and
|
||||||
// eventually be deleted once mmap is released.
|
// eventually be deleted once mmap is released.
|
||||||
assert!(managed_directory.garbage_collect(|| living_files).is_ok());
|
managed_directory.garbage_collect(|| living_files);
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1));
|
||||||
} else {
|
} else {
|
||||||
assert!(!managed_directory.exists(test_path1));
|
assert!(!managed_directory.exists(test_path1));
|
||||||
@@ -415,8 +394,6 @@ mod tests_mmap_specific {
|
|||||||
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
|
||||||
write.terminate().unwrap();
|
write.terminate().unwrap();
|
||||||
|
|
||||||
let read_source = managed_directory.open_read(test_path2).unwrap();
|
|
||||||
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
|
|
||||||
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
assert!(managed_directory.list_damaged().unwrap().is_empty());
|
||||||
|
|
||||||
let mut corrupted_path = tempdir_path.clone();
|
let mut corrupted_path = tempdir_path.clone();
|
||||||
|
|||||||
@@ -131,29 +131,22 @@ impl MmapCache {
|
|||||||
}
|
}
|
||||||
self.cache.remove(full_path);
|
self.cache.remove(full_path);
|
||||||
self.counters.miss += 1;
|
self.counters.miss += 1;
|
||||||
let mmap_opt = open_mmap(full_path)?;
|
Ok(if let Some(mmap) = open_mmap(full_path)? {
|
||||||
Ok(mmap_opt.map(|mmap| {
|
|
||||||
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
|
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
|
||||||
let mmap_weak = Arc::downgrade(&mmap_arc);
|
let mmap_weak = Arc::downgrade(&mmap_arc);
|
||||||
self.cache.insert(full_path.to_owned(), mmap_weak);
|
self.cache.insert(full_path.to_owned(), mmap_weak);
|
||||||
mmap_arc
|
Some(mmap_arc)
|
||||||
}))
|
} else {
|
||||||
|
None
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum WatcherMode {
|
|
||||||
Event,
|
|
||||||
Poll
|
|
||||||
}
|
|
||||||
|
|
||||||
struct WatcherWrapper {
|
struct WatcherWrapper {
|
||||||
_watcher: Mutex<notify::RecommendedWatcher>,
|
_watcher: Mutex<notify::RecommendedWatcher>,
|
||||||
watcher_router: Arc<WatchCallbackList>,
|
watcher_router: Arc<WatchCallbackList>,
|
||||||
watcher_mode: WatcherMode,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
impl WatcherWrapper {
|
impl WatcherWrapper {
|
||||||
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
|
||||||
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
|
||||||
@@ -171,57 +164,33 @@ impl WatcherWrapper {
|
|||||||
})?;
|
})?;
|
||||||
let watcher_router: Arc<WatchCallbackList> = Default::default();
|
let watcher_router: Arc<WatchCallbackList> = Default::default();
|
||||||
let watcher_router_clone = watcher_router.clone();
|
let watcher_router_clone = watcher_router.clone();
|
||||||
let path_clone = path.clone();
|
|
||||||
let meta_path = path_clone.join(*META_FILEPATH);
|
|
||||||
thread::Builder::new()
|
thread::Builder::new()
|
||||||
.name("meta-file-watch-thread".to_string())
|
.name("meta-file-watch-thread".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
let mut old_content = String::new();
|
|
||||||
let mode = WatcherMode::Event;
|
|
||||||
loop {
|
loop {
|
||||||
match mode {
|
match watcher_recv.recv().map(|evt| evt.path) {
|
||||||
WatcherMode::Event => {
|
Ok(Some(changed_path)) => {
|
||||||
match watcher_recv.recv().map(|evt| evt.path) {
|
// ... Actually subject to false positive.
|
||||||
Ok(Some(changed_path)) => {
|
// We might want to be more accurate than this at one point.
|
||||||
// ... Actually subject to false positive.
|
if let Some(filename) = changed_path.file_name() {
|
||||||
// We might want to be more accurate than this at one point.
|
if filename == *META_FILEPATH {
|
||||||
if let Some(filename) = changed_path.file_name() {
|
watcher_router_clone.broadcast();
|
||||||
if filename == *META_FILEPATH {
|
|
||||||
let _ = watcher_router_clone.broadcast();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => {
|
|
||||||
// not an event we are interested in.
|
|
||||||
}
|
|
||||||
Err(_e) => {
|
|
||||||
// the watch send channel was dropped
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WatcherMode::Poll => {
|
Ok(None) => {
|
||||||
let mut file = match File::open(&meta_path) {
|
// not an event we are interested in.
|
||||||
Err(why) => panic!("open: nope"),
|
|
||||||
Ok(file) => file,
|
|
||||||
};
|
|
||||||
let mut new_content = String::new();
|
|
||||||
match file.read_to_string(&mut new_content) {
|
|
||||||
Err(why) => panic!("read: nope"),
|
|
||||||
Ok(_) => {},
|
|
||||||
}
|
|
||||||
if old_content != new_content {
|
|
||||||
let _ = watcher_router_clone.broadcast();
|
|
||||||
old_content = new_content;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
Err(_e) => {
|
||||||
|
// the watch send channel was dropped
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})?;
|
})?;
|
||||||
Ok(WatcherWrapper {
|
Ok(WatcherWrapper {
|
||||||
_watcher: Mutex::new(watcher),
|
_watcher: Mutex::new(watcher),
|
||||||
watcher_router,
|
watcher_router,
|
||||||
watcher_mode: WatcherMode::Event,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -569,15 +538,16 @@ mod tests {
|
|||||||
// The following tests are specific to the MmapDirectory
|
// The following tests are specific to the MmapDirectory
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::indexer::LogMergePolicy;
|
|
||||||
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
use crate::schema::{Schema, SchemaBuilder, TEXT};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_open_non_existent_path() {
|
fn test_open_non_existant_path() {
|
||||||
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
|
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -670,18 +640,13 @@ mod tests {
|
|||||||
let tmp_dir = tempfile::TempDir::new().unwrap();
|
let tmp_dir = tempfile::TempDir::new().unwrap();
|
||||||
let tmp_dirpath = tmp_dir.path().to_owned();
|
let tmp_dirpath = tmp_dir.path().to_owned();
|
||||||
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
|
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
|
||||||
let tmp_file = tmp_dirpath.join(*META_FILEPATH);
|
let tmp_file = tmp_dirpath.join("coucou");
|
||||||
let _handle = watch_wrapper.watch(Box::new(move || {
|
let _handle = watch_wrapper.watch(Box::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
}));
|
}));
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
|
||||||
let _handle2 = watch_wrapper.watch(Box::new(move || {
|
|
||||||
let _ = sender.send(());
|
|
||||||
}));
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||||
fs::write(&tmp_file, b"whateverwilldo").unwrap();
|
fs::write(&tmp_file, b"whateverwilldo").unwrap();
|
||||||
assert!(receiver.recv().is_ok());
|
thread::sleep(Duration::new(0, 1_000u32));
|
||||||
assert!(counter.load(Ordering::SeqCst) >= 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -690,42 +655,34 @@ mod tests {
|
|||||||
let mut schema_builder: SchemaBuilder = Schema::builder();
|
let mut schema_builder: SchemaBuilder = Schema::builder();
|
||||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
|
|
||||||
{
|
{
|
||||||
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
let index = Index::create(mmap_directory.clone(), schema).unwrap();
|
||||||
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
let mut log_merge_policy = LogMergePolicy::default();
|
for _num_commits in 0..16 {
|
||||||
log_merge_policy.set_min_merge_size(3);
|
|
||||||
index_writer.set_merge_policy(Box::new(log_merge_policy));
|
|
||||||
for _num_commits in 0..10 {
|
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
index_writer.add_document(doc!(text_field=>"abc"));
|
index_writer.add_document(doc!(text_field=>"abc"));
|
||||||
}
|
}
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let reader = index
|
let reader = index
|
||||||
.reader_builder()
|
.reader_builder()
|
||||||
.reload_policy(ReloadPolicy::Manual)
|
.reload_policy(ReloadPolicy::Manual)
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
for _ in 0..30 {
|
||||||
for _ in 0..4 {
|
|
||||||
index_writer.add_document(doc!(text_field=>"abc"));
|
index_writer.add_document(doc!(text_field=>"abc"));
|
||||||
index_writer.commit().unwrap();
|
index_writer.commit().unwrap();
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
}
|
}
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads().unwrap();
|
||||||
|
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let num_segments = reader.searcher().segment_readers().len();
|
let num_segments = reader.searcher().segment_readers().len();
|
||||||
assert!(num_segments <= 4);
|
assert_eq!(num_segments, 4);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
num_segments * 7,
|
num_segments * 7,
|
||||||
mmap_directory.get_cache_info().mmapped.len()
|
mmap_directory.get_cache_info().mmapped.len()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
assert!(mmap_directory.get_cache_info().mmapped.is_empty());
|
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,22 +23,9 @@ pub use self::directory::{Directory, DirectoryClone};
|
|||||||
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
|
||||||
pub use self::ram_directory::RAMDirectory;
|
pub use self::ram_directory::RAMDirectory;
|
||||||
pub use self::read_only_source::ReadOnlySource;
|
pub use self::read_only_source::ReadOnlySource;
|
||||||
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle};
|
pub(crate) use self::watch_event_router::WatchCallbackList;
|
||||||
|
pub use self::watch_event_router::{WatchCallback, WatchHandle};
|
||||||
use std::io::{self, BufWriter, Write};
|
use std::io::{self, BufWriter, Write};
|
||||||
use std::path::PathBuf;
|
|
||||||
/// Outcome of the Garbage collection
|
|
||||||
pub struct GarbageCollectionResult {
|
|
||||||
/// List of files that were deleted in this cycle
|
|
||||||
pub deleted_files: Vec<PathBuf>,
|
|
||||||
/// List of files that were schedule to be deleted in this cycle,
|
|
||||||
/// but deletion did not work. This typically happens on windows,
|
|
||||||
/// as deleting a memory mapped file is forbidden.
|
|
||||||
///
|
|
||||||
/// If a searcher is still held, a file cannot be deleted.
|
|
||||||
/// This is not considered a bug, the file will simply be deleted
|
|
||||||
/// in the next GC.
|
|
||||||
pub failed_to_delete_files: Vec<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
pub use self::mmap_directory::MmapDirectory;
|
pub use self::mmap_directory::MmapDirectory;
|
||||||
@@ -46,9 +33,6 @@ pub use self::mmap_directory::MmapDirectory;
|
|||||||
pub use self::managed_directory::ManagedDirectory;
|
pub use self::managed_directory::ManagedDirectory;
|
||||||
|
|
||||||
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
|
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
|
||||||
///
|
|
||||||
/// The point is that while the type is public, it cannot be built by anyone
|
|
||||||
/// outside of this module.
|
|
||||||
pub struct AntiCallToken(());
|
pub struct AntiCallToken(());
|
||||||
|
|
||||||
/// Trait used to indicate when no more write need to be done on a writer
|
/// Trait used to indicate when no more write need to be done on a writer
|
||||||
@@ -79,13 +63,6 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
|
|
||||||
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
|
|
||||||
self.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write object for Directory.
|
/// Write object for Directory.
|
||||||
///
|
///
|
||||||
/// `WritePtr` are required to implement both Write
|
/// `WritePtr` are required to implement both Write
|
||||||
|
|||||||
@@ -191,11 +191,11 @@ impl Directory for RAMDirectory {
|
|||||||
// Reserve the path to prevent calls to .write() to succeed.
|
// Reserve the path to prevent calls to .write() to succeed.
|
||||||
self.fs.write().unwrap().write(path_buf.clone(), &[]);
|
self.fs.write().unwrap().write(path_buf.clone(), &[]);
|
||||||
|
|
||||||
let mut vec_writer = VecWriter::new(path_buf, self.clone());
|
let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone());
|
||||||
vec_writer.write_all(data)?;
|
vec_writer.write_all(data)?;
|
||||||
vec_writer.flush()?;
|
vec_writer.flush()?;
|
||||||
if path == Path::new(&*META_FILEPATH) {
|
if path == Path::new(&*META_FILEPATH) {
|
||||||
let _ = self.fs.write().unwrap().watch_router.broadcast();
|
self.fs.write().unwrap().watch_router.broadcast();
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,12 +70,6 @@ impl ReadOnlySource {
|
|||||||
(left, right)
|
(left, right)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
|
|
||||||
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
|
|
||||||
let left_len = self.len() - right_len;
|
|
||||||
self.split(left_len)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a ReadOnlySource that is just a
|
/// Creates a ReadOnlySource that is just a
|
||||||
/// view over a slice of the data.
|
/// view over a slice of the data.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -1,117 +1,25 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use futures::channel::oneshot;
|
|
||||||
use futures::executor::block_on;
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::atomic::Ordering::SeqCst;
|
use std::sync::atomic::AtomicUsize;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
use std::sync::atomic::Ordering;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
use std::time;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
#[cfg(feature = "mmap")]
|
#[test]
|
||||||
mod mmap_directory_tests {
|
fn test_ram_directory() {
|
||||||
use crate::directory::MmapDirectory;
|
let mut ram_directory = RAMDirectory::create();
|
||||||
|
test_directory(&mut ram_directory);
|
||||||
type DirectoryImpl = MmapDirectory;
|
|
||||||
|
|
||||||
fn make_directory() -> DirectoryImpl {
|
|
||||||
MmapDirectory::create_from_tempdir().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_simple() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_simple(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_write_create_the_file() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_write_create_the_file(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_rewrite_forbidden() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_rewrite_forbidden(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_directory_delete() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_directory_delete(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_lock_non_blocking() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_lock_non_blocking(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_lock_blocking() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_lock_blocking(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_watch() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_watch(&mut directory);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod ram_directory_tests {
|
#[test]
|
||||||
use crate::directory::RAMDirectory;
|
#[cfg(feature = "mmap")]
|
||||||
|
fn test_mmap_directory() {
|
||||||
type DirectoryImpl = RAMDirectory;
|
let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
|
||||||
|
test_directory(&mut mmap_directory);
|
||||||
fn make_directory() -> DirectoryImpl {
|
|
||||||
RAMDirectory::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_simple() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_simple(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_write_create_the_file() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_write_create_the_file(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_rewrite_forbidden() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_rewrite_forbidden(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_directory_delete() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_directory_delete(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_lock_non_blocking() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_lock_non_blocking(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_lock_blocking() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_lock_blocking(&mut directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_watch() {
|
|
||||||
let mut directory = make_directory();
|
|
||||||
super::test_watch(&mut directory);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -191,39 +99,48 @@ fn test_directory_delete(directory: &mut dyn Directory) {
|
|||||||
assert!(directory.delete(&test_path).is_err());
|
assert!(directory.delete(&test_path).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn test_directory(directory: &mut dyn Directory) {
|
||||||
|
test_simple(directory);
|
||||||
|
test_rewrite_forbidden(directory);
|
||||||
|
test_write_create_the_file(directory);
|
||||||
|
test_directory_delete(directory);
|
||||||
|
test_lock_non_blocking(directory);
|
||||||
|
test_lock_blocking(directory);
|
||||||
|
test_watch(directory);
|
||||||
|
}
|
||||||
|
|
||||||
fn test_watch(directory: &mut dyn Directory) {
|
fn test_watch(directory: &mut dyn Directory) {
|
||||||
let num_progress: Arc<AtomicUsize> = Default::default();
|
|
||||||
let counter: Arc<AtomicUsize> = Default::default();
|
let counter: Arc<AtomicUsize> = Default::default();
|
||||||
let counter_clone = counter.clone();
|
let counter_clone = counter.clone();
|
||||||
let (sender, receiver) = crossbeam::channel::unbounded();
|
|
||||||
let watch_callback = Box::new(move || {
|
let watch_callback = Box::new(move || {
|
||||||
counter_clone.fetch_add(1, SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
// This callback is used to synchronize watching in our unit test.
|
assert!(directory
|
||||||
// We bind it to a variable because the callback is removed when that
|
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
||||||
// handle is dropped.
|
.is_ok());
|
||||||
let watch_handle = directory.watch(watch_callback).unwrap();
|
thread::sleep(Duration::new(0, 10_000));
|
||||||
let _progress_listener = directory
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
.watch(Box::new(move || {
|
|
||||||
let val = num_progress.fetch_add(1, SeqCst);
|
|
||||||
let _ = sender.send(val);
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
|
let watch_handle = directory.watch(watch_callback).unwrap();
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
assert_eq!(i, counter.load(SeqCst));
|
assert_eq!(i, counter.load(Ordering::SeqCst));
|
||||||
assert!(directory
|
assert!(directory
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i));
|
for _ in 0..1_000 {
|
||||||
assert_eq!(i + 1, counter.load(SeqCst));
|
if counter.load(Ordering::SeqCst) > i {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
thread::sleep(Duration::from_millis(10));
|
||||||
|
}
|
||||||
|
assert_eq!(i + 1, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
mem::drop(watch_handle);
|
mem::drop(watch_handle);
|
||||||
assert!(directory
|
assert!(directory
|
||||||
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
.atomic_write(Path::new("meta.json"), b"random_test_data")
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok());
|
thread::sleep(Duration::from_millis(200));
|
||||||
assert_eq!(10, counter.load(SeqCst));
|
assert_eq!(10, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
fn test_lock_non_blocking(directory: &mut dyn Directory) {
|
||||||
@@ -257,13 +174,9 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
|
|||||||
is_blocking: true,
|
is_blocking: true,
|
||||||
});
|
});
|
||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
let in_thread = Arc::new(AtomicBool::default());
|
|
||||||
let in_thread_clone = in_thread.clone();
|
|
||||||
let (sender, receiver) = oneshot::channel();
|
|
||||||
std::thread::spawn(move || {
|
std::thread::spawn(move || {
|
||||||
//< lock_a_res is sent to the thread.
|
//< lock_a_res is sent to the thread.
|
||||||
in_thread_clone.store(true, SeqCst);
|
std::thread::sleep(time::Duration::from_millis(10));
|
||||||
let _just_sync = block_on(receiver);
|
|
||||||
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
// explicitely droping lock_a_res. It would have been sufficient to just force it
|
||||||
// to be part of the move, but the intent seems clearer that way.
|
// to be part of the move, but the intent seems clearer that way.
|
||||||
drop(lock_a_res);
|
drop(lock_a_res);
|
||||||
@@ -276,18 +189,14 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
|
|||||||
});
|
});
|
||||||
assert!(lock_a_res.is_err());
|
assert!(lock_a_res.is_err());
|
||||||
}
|
}
|
||||||
let directory_clone = directory.box_clone();
|
{
|
||||||
let (sender2, receiver2) = oneshot::channel();
|
// the blocking call should wait for at least 10ms.
|
||||||
let join_handle = std::thread::spawn(move || {
|
let start = time::Instant::now();
|
||||||
assert!(sender2.send(()).is_ok());
|
let lock_a_res = directory.acquire_lock(&Lock {
|
||||||
let lock_a_res = directory_clone.acquire_lock(&Lock {
|
|
||||||
filepath: PathBuf::from("a.lock"),
|
filepath: PathBuf::from("a.lock"),
|
||||||
is_blocking: true,
|
is_blocking: true,
|
||||||
});
|
});
|
||||||
assert!(in_thread.load(SeqCst));
|
|
||||||
assert!(lock_a_res.is_ok());
|
assert!(lock_a_res.is_ok());
|
||||||
});
|
assert!(start.elapsed().subsec_millis() >= 10);
|
||||||
assert!(block_on(receiver2).is_ok());
|
}
|
||||||
assert!(sender.send(()).is_ok());
|
|
||||||
assert!(join_handle.join().is_ok());
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
use futures::channel::oneshot;
|
|
||||||
use futures::{Future, TryFutureExt};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
@@ -24,20 +22,13 @@ pub struct WatchCallbackList {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct WatchHandle(Arc<WatchCallback>);
|
pub struct WatchHandle(Arc<WatchCallback>);
|
||||||
|
|
||||||
impl WatchHandle {
|
|
||||||
/// Create a WatchHandle handle.
|
|
||||||
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
|
|
||||||
WatchHandle(watch_callback)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WatchCallbackList {
|
impl WatchCallbackList {
|
||||||
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
|
||||||
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
|
||||||
let watch_callback_arc = Arc::new(watch_callback);
|
let watch_callback_arc = Arc::new(watch_callback);
|
||||||
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
|
||||||
self.router.write().unwrap().push(watch_callback_weak);
|
self.router.write().unwrap().push(watch_callback_weak);
|
||||||
WatchHandle::new(watch_callback_arc)
|
WatchHandle(watch_callback_arc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
|
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
|
||||||
@@ -56,21 +47,14 @@ impl WatchCallbackList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Triggers all callbacks
|
/// Triggers all callbacks
|
||||||
pub fn broadcast(&self) -> impl Future<Output = ()> {
|
pub fn broadcast(&self) {
|
||||||
let callbacks = self.list_callback();
|
let callbacks = self.list_callback();
|
||||||
let (sender, receiver) = oneshot::channel();
|
|
||||||
let result = receiver.unwrap_or_else(|_| ());
|
|
||||||
if callbacks.is_empty() {
|
|
||||||
let _ = sender.send(());
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
let spawn_res = std::thread::Builder::new()
|
let spawn_res = std::thread::Builder::new()
|
||||||
.name("watch-callbacks".to_string())
|
.name("watch-callbacks".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
for callback in callbacks {
|
for callback in callbacks {
|
||||||
callback();
|
callback();
|
||||||
}
|
}
|
||||||
let _ = sender.send(());
|
|
||||||
});
|
});
|
||||||
if let Err(err) = spawn_res {
|
if let Err(err) = spawn_res {
|
||||||
error!(
|
error!(
|
||||||
@@ -78,17 +62,19 @@ impl WatchCallbackList {
|
|||||||
err
|
err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
result
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::directory::WatchCallbackList;
|
use crate::directory::WatchCallbackList;
|
||||||
use futures::executor::block_on;
|
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
const WAIT_TIME: u64 = 20;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_watch_event_router_simple() {
|
fn test_watch_event_router_simple() {
|
||||||
@@ -98,22 +84,22 @@ mod tests {
|
|||||||
let inc_callback = Box::new(move || {
|
let inc_callback = Box::new(move || {
|
||||||
counter_clone.fetch_add(1, Ordering::SeqCst);
|
counter_clone.fetch_add(1, Ordering::SeqCst);
|
||||||
});
|
});
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast();
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast();
|
||||||
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
assert_eq!(1, counter.load(Ordering::SeqCst));
|
assert_eq!(1, counter.load(Ordering::SeqCst));
|
||||||
block_on(async {
|
watch_event_router.broadcast();
|
||||||
(
|
watch_event_router.broadcast();
|
||||||
watch_event_router.broadcast().await,
|
watch_event_router.broadcast();
|
||||||
watch_event_router.broadcast().await,
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
watch_event_router.broadcast().await,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast();
|
||||||
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
assert_eq!(4, counter.load(Ordering::SeqCst));
|
assert_eq!(4, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,20 +115,20 @@ mod tests {
|
|||||||
};
|
};
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
let handle_a = watch_event_router.subscribe(inc_callback(1));
|
||||||
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
|
||||||
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
block_on(async {
|
watch_event_router.broadcast();
|
||||||
futures::join!(
|
watch_event_router.broadcast();
|
||||||
watch_event_router.broadcast(),
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
watch_event_router.broadcast()
|
|
||||||
)
|
|
||||||
});
|
|
||||||
assert_eq!(22, counter.load(Ordering::SeqCst));
|
assert_eq!(22, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast();
|
||||||
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||||
mem::drop(handle_a2);
|
mem::drop(handle_a2);
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast();
|
||||||
block_on(watch_event_router.broadcast());
|
watch_event_router.broadcast();
|
||||||
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
assert_eq!(32, counter.load(Ordering::SeqCst));
|
assert_eq!(32, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,15 +142,14 @@ mod tests {
|
|||||||
});
|
});
|
||||||
let handle_a = watch_event_router.subscribe(inc_callback);
|
let handle_a = watch_event_router.subscribe(inc_callback);
|
||||||
assert_eq!(0, counter.load(Ordering::SeqCst));
|
assert_eq!(0, counter.load(Ordering::SeqCst));
|
||||||
block_on(async {
|
watch_event_router.broadcast();
|
||||||
let future1 = watch_event_router.broadcast();
|
watch_event_router.broadcast();
|
||||||
let future2 = watch_event_router.broadcast();
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
futures::join!(future1, future2)
|
|
||||||
});
|
|
||||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||||
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
mem::drop(handle_a);
|
mem::drop(handle_a);
|
||||||
let _ = watch_event_router.broadcast();
|
watch_event_router.broadcast();
|
||||||
block_on(watch_event_router.broadcast());
|
thread::sleep(Duration::from_millis(WAIT_TIME));
|
||||||
assert_eq!(2, counter.load(Ordering::SeqCst));
|
assert_eq!(2, counter.load(Ordering::SeqCst));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
14
src/error.rs
14
src/error.rs
@@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
|
use crate::directory::error::LockError;
|
||||||
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||||
use crate::directory::error::{Incompatibility, LockError};
|
|
||||||
use crate::fastfield::FastFieldNotAvailableError;
|
use crate::fastfield::FastFieldNotAvailableError;
|
||||||
use crate::query;
|
use crate::query;
|
||||||
use crate::schema;
|
use crate::schema;
|
||||||
@@ -80,9 +80,6 @@ pub enum TantivyError {
|
|||||||
/// System error. (e.g.: We failed spawning a new thread)
|
/// System error. (e.g.: We failed spawning a new thread)
|
||||||
#[fail(display = "System error.'{}'", _0)]
|
#[fail(display = "System error.'{}'", _0)]
|
||||||
SystemError(String),
|
SystemError(String),
|
||||||
/// Index incompatible with current version of tantivy
|
|
||||||
#[fail(display = "{:?}", _0)]
|
|
||||||
IncompatibleIndex(Incompatibility),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DataCorruption> for TantivyError {
|
impl From<DataCorruption> for TantivyError {
|
||||||
@@ -132,9 +129,6 @@ impl From<OpenReadError> for TantivyError {
|
|||||||
match error {
|
match error {
|
||||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
||||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||||
OpenReadError::IncompatibleIndex(incompatibility) => {
|
|
||||||
TantivyError::IncompatibleIndex(incompatibility)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -176,9 +170,3 @@ impl From<serde_json::Error> for TantivyError {
|
|||||||
TantivyError::IOError(io_err.into())
|
TantivyError::IOError(io_err.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<rayon::ThreadPoolBuildError> for TantivyError {
|
|
||||||
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
|
|
||||||
TantivyError::SystemError(error.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,19 +1,17 @@
|
|||||||
use crate::common::{BitSet, HasLen};
|
use crate::common::HasLen;
|
||||||
use crate::directory::ReadOnlySource;
|
use crate::directory::ReadOnlySource;
|
||||||
use crate::directory::WritePtr;
|
use crate::directory::WritePtr;
|
||||||
use crate::space_usage::ByteCount;
|
use crate::space_usage::ByteCount;
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
|
use bit_set::BitSet;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
/// Write a delete `BitSet`
|
/// Write a delete `BitSet`
|
||||||
///
|
///
|
||||||
/// where `delete_bitset` is the set of deleted `DocId`.
|
/// where `delete_bitset` is the set of deleted `DocId`.
|
||||||
pub fn write_delete_bitset(
|
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
|
||||||
delete_bitset: &BitSet,
|
let max_doc = delete_bitset.capacity();
|
||||||
max_doc: u32,
|
|
||||||
writer: &mut WritePtr,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
let mut byte = 0u8;
|
let mut byte = 0u8;
|
||||||
let mut shift = 0u8;
|
let mut shift = 0u8;
|
||||||
for doc in 0..max_doc {
|
for doc in 0..max_doc {
|
||||||
@@ -31,7 +29,7 @@ pub fn write_delete_bitset(
|
|||||||
if max_doc % 8 > 0 {
|
if max_doc % 8 > 0 {
|
||||||
writer.write_all(&[byte])?;
|
writer.write_all(&[byte])?;
|
||||||
}
|
}
|
||||||
Ok(())
|
writer.flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set of deleted `DocId`s.
|
/// Set of deleted `DocId`s.
|
||||||
@@ -85,40 +83,43 @@ impl HasLen for DeleteBitSet {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::directory::*;
|
use crate::directory::*;
|
||||||
|
use bit_set::BitSet;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) {
|
fn test_delete_bitset_helper(bitset: &BitSet) {
|
||||||
let test_path = PathBuf::from("test");
|
let test_path = PathBuf::from("test");
|
||||||
let mut directory = RAMDirectory::create();
|
let mut directory = RAMDirectory::create();
|
||||||
{
|
{
|
||||||
let mut writer = directory.open_write(&*test_path).unwrap();
|
let mut writer = directory.open_write(&*test_path).unwrap();
|
||||||
write_delete_bitset(bitset, max_doc, &mut writer).unwrap();
|
write_delete_bitset(bitset, &mut writer).unwrap();
|
||||||
writer.terminate().unwrap();
|
|
||||||
}
|
}
|
||||||
let source = directory.open_read(&test_path).unwrap();
|
{
|
||||||
let delete_bitset = DeleteBitSet::open(source);
|
let source = directory.open_read(&test_path).unwrap();
|
||||||
for doc in 0..max_doc {
|
let delete_bitset = DeleteBitSet::open(source);
|
||||||
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
let n = bitset.capacity();
|
||||||
|
for doc in 0..n {
|
||||||
|
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
|
||||||
|
}
|
||||||
|
assert_eq!(delete_bitset.len(), bitset.len());
|
||||||
}
|
}
|
||||||
assert_eq!(delete_bitset.len(), bitset.len());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_delete_bitset() {
|
fn test_delete_bitset() {
|
||||||
{
|
{
|
||||||
let mut bitset = BitSet::with_max_value(10);
|
let mut bitset = BitSet::with_capacity(10);
|
||||||
bitset.insert(1);
|
bitset.insert(1);
|
||||||
bitset.insert(9);
|
bitset.insert(9);
|
||||||
test_delete_bitset_helper(&bitset, 10);
|
test_delete_bitset_helper(&bitset);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut bitset = BitSet::with_max_value(8);
|
let mut bitset = BitSet::with_capacity(8);
|
||||||
bitset.insert(1);
|
bitset.insert(1);
|
||||||
bitset.insert(2);
|
bitset.insert(2);
|
||||||
bitset.insert(3);
|
bitset.insert(3);
|
||||||
bitset.insert(5);
|
bitset.insert(5);
|
||||||
bitset.insert(7);
|
bitset.insert(7);
|
||||||
test_delete_bitset_helper(&bitset, 8);
|
test_delete_bitset_helper(&bitset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ pub use self::reader::FastFieldReader;
|
|||||||
pub use self::readers::FastFieldReaders;
|
pub use self::readers::FastFieldReaders;
|
||||||
pub use self::serializer::FastFieldSerializer;
|
pub use self::serializer::FastFieldSerializer;
|
||||||
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
|
||||||
use crate::chrono::{NaiveDateTime, Utc};
|
|
||||||
use crate::common;
|
use crate::common;
|
||||||
use crate::schema::Cardinality;
|
use crate::schema::Cardinality;
|
||||||
use crate::schema::FieldType;
|
use crate::schema::FieldType;
|
||||||
@@ -50,7 +49,7 @@ mod serializer;
|
|||||||
mod writer;
|
mod writer;
|
||||||
|
|
||||||
/// Trait for types that are allowed for fast fields: (u64, i64 and f64).
|
/// Trait for types that are allowed for fast fields: (u64, i64 and f64).
|
||||||
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
|
||||||
/// Converts a value from u64
|
/// Converts a value from u64
|
||||||
///
|
///
|
||||||
/// Internally all fast field values are encoded as u64.
|
/// Internally all fast field values are encoded as u64.
|
||||||
@@ -70,12 +69,6 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
|
|||||||
/// Cast value to `u64`.
|
/// Cast value to `u64`.
|
||||||
/// The value is just reinterpreted in memory.
|
/// The value is just reinterpreted in memory.
|
||||||
fn as_u64(&self) -> u64;
|
fn as_u64(&self) -> u64;
|
||||||
|
|
||||||
/// Build a default value. This default value is never used, so the value does not
|
|
||||||
/// really matter.
|
|
||||||
fn make_zero() -> Self {
|
|
||||||
Self::from_u64(0i64.to_u64())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for u64 {
|
impl FastValue for u64 {
|
||||||
@@ -142,34 +135,11 @@ impl FastValue for f64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FastValue for crate::DateTime {
|
|
||||||
fn from_u64(timestamp_u64: u64) -> Self {
|
|
||||||
let timestamp_i64 = i64::from_u64(timestamp_u64);
|
|
||||||
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_u64(&self) -> u64 {
|
|
||||||
self.timestamp().to_u64()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
|
|
||||||
match *field_type {
|
|
||||||
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_u64(&self) -> u64 {
|
|
||||||
self.timestamp().as_u64()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn value_to_u64(value: &Value) -> u64 {
|
fn value_to_u64(value: &Value) -> u64 {
|
||||||
match *value {
|
match *value {
|
||||||
Value::U64(ref val) => *val,
|
Value::U64(ref val) => *val,
|
||||||
Value::I64(ref val) => common::i64_to_u64(*val),
|
Value::I64(ref val) => common::i64_to_u64(*val),
|
||||||
Value::F64(ref val) => common::f64_to_u64(*val),
|
Value::F64(ref val) => common::f64_to_u64(*val),
|
||||||
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
|
|
||||||
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
|
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -181,12 +151,10 @@ mod tests {
|
|||||||
use crate::common::CompositeFile;
|
use crate::common::CompositeFile;
|
||||||
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
use crate::directory::{Directory, RAMDirectory, WritePtr};
|
||||||
use crate::fastfield::FastFieldReader;
|
use crate::fastfield::FastFieldReader;
|
||||||
use crate::merge_policy::NoMergePolicy;
|
use crate::schema::Document;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::schema::FAST;
|
use crate::schema::FAST;
|
||||||
use crate::schema::{Document, IntOptions};
|
|
||||||
use crate::{Index, SegmentId, SegmentReader};
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use rand::prelude::SliceRandom;
|
use rand::prelude::SliceRandom;
|
||||||
use rand::rngs::StdRng;
|
use rand::rngs::StdRng;
|
||||||
@@ -210,12 +178,6 @@ mod tests {
|
|||||||
assert_eq!(test_fastfield.get(2), 300);
|
assert_eq!(test_fastfield.get(2), 300);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_fastfield_i64_u64() {
|
|
||||||
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
|
|
||||||
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_intfastfield_small() {
|
fn test_intfastfield_small() {
|
||||||
let path = Path::new("test");
|
let path = Path::new("test");
|
||||||
@@ -467,93 +429,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_merge_missing_date_fast_field() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let date_field = schema_builder.add_date_field("date", FAST);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
|
||||||
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
index_writer.add_document(doc!());
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let segment_ids: Vec<SegmentId> = reader
|
|
||||||
.searcher()
|
|
||||||
.segment_readers()
|
|
||||||
.iter()
|
|
||||||
.map(SegmentReader::segment_id)
|
|
||||||
.collect();
|
|
||||||
assert_eq!(segment_ids.len(), 2);
|
|
||||||
let merge_future = index_writer.merge(&segment_ids[..]);
|
|
||||||
let merge_res = futures::executor::block_on(merge_future);
|
|
||||||
assert!(merge_res.is_ok());
|
|
||||||
assert!(reader.reload().is_ok());
|
|
||||||
assert_eq!(reader.searcher().segment_readers().len(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_default_datetime() {
|
|
||||||
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_datefastfield() {
|
|
||||||
use crate::fastfield::FastValue;
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let date_field = schema_builder.add_date_field("date", FAST);
|
|
||||||
let multi_date_field = schema_builder.add_date_field(
|
|
||||||
"multi_date",
|
|
||||||
IntOptions::default().set_fast(Cardinality::MultiValues),
|
|
||||||
);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let index = Index::create_in_ram(schema);
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
date_field => crate::DateTime::from_u64(1i64.to_u64()),
|
|
||||||
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
|
|
||||||
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
date_field => crate::DateTime::from_u64(4i64.to_u64())
|
|
||||||
));
|
|
||||||
index_writer.add_document(doc!(
|
|
||||||
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
|
|
||||||
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
|
|
||||||
));
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
let reader = index.reader().unwrap();
|
|
||||||
let searcher = reader.searcher();
|
|
||||||
assert_eq!(searcher.segment_readers().len(), 1);
|
|
||||||
let segment_reader = searcher.segment_reader(0);
|
|
||||||
let fast_fields = segment_reader.fast_fields();
|
|
||||||
let date_fast_field = fast_fields.date(date_field).unwrap();
|
|
||||||
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
|
|
||||||
let mut dates = vec![];
|
|
||||||
{
|
|
||||||
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
|
|
||||||
dates_fast_field.get_vals(0u32, &mut dates);
|
|
||||||
assert_eq!(dates.len(), 2);
|
|
||||||
assert_eq!(dates[0].timestamp(), 2i64);
|
|
||||||
assert_eq!(dates[1].timestamp(), 3i64);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
|
|
||||||
dates_fast_field.get_vals(1u32, &mut dates);
|
|
||||||
assert!(dates.is_empty());
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
|
|
||||||
dates_fast_field.get_vals(2u32, &mut dates);
|
|
||||||
assert_eq!(dates.len(), 2);
|
|
||||||
assert_eq!(dates[0].timestamp(), 5i64);
|
|
||||||
assert_eq!(dates[1].timestamp(), 6i64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(test, feature = "unstable"))]
|
#[cfg(all(test, feature = "unstable"))]
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
|
|||||||
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
|
||||||
let (start, stop) = self.range(doc);
|
let (start, stop) = self.range(doc);
|
||||||
let len = (stop - start) as usize;
|
let len = (stop - start) as usize;
|
||||||
vals.resize(len, Item::make_zero());
|
vals.resize(len, Item::default());
|
||||||
self.vals_reader.get_range_u64(start, &mut vals[..]);
|
self.vals_reader.get_range_u64(start, &mut vals[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,11 +15,9 @@ pub struct FastFieldReaders {
|
|||||||
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
|
fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
|
||||||
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
|
fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
|
||||||
fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
|
fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
|
||||||
fast_field_date: HashMap<Field, FastFieldReader<crate::DateTime>>,
|
|
||||||
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
|
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
|
||||||
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
|
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
|
||||||
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
|
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
|
||||||
fast_field_dates: HashMap<Field, MultiValueIntFastFieldReader<crate::DateTime>>,
|
|
||||||
fast_bytes: HashMap<Field, BytesFastFieldReader>,
|
fast_bytes: HashMap<Field, BytesFastFieldReader>,
|
||||||
fast_fields_composite: CompositeFile,
|
fast_fields_composite: CompositeFile,
|
||||||
}
|
}
|
||||||
@@ -28,7 +26,6 @@ enum FastType {
|
|||||||
I64,
|
I64,
|
||||||
U64,
|
U64,
|
||||||
F64,
|
F64,
|
||||||
Date,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
|
||||||
@@ -42,9 +39,6 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
|
|||||||
FieldType::F64(options) => options
|
FieldType::F64(options) => options
|
||||||
.get_fastfield_cardinality()
|
.get_fastfield_cardinality()
|
||||||
.map(|cardinality| (FastType::F64, cardinality)),
|
.map(|cardinality| (FastType::F64, cardinality)),
|
||||||
FieldType::Date(options) => options
|
|
||||||
.get_fastfield_cardinality()
|
|
||||||
.map(|cardinality| (FastType::Date, cardinality)),
|
|
||||||
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
|
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
@@ -59,11 +53,9 @@ impl FastFieldReaders {
|
|||||||
fast_field_i64: Default::default(),
|
fast_field_i64: Default::default(),
|
||||||
fast_field_u64: Default::default(),
|
fast_field_u64: Default::default(),
|
||||||
fast_field_f64: Default::default(),
|
fast_field_f64: Default::default(),
|
||||||
fast_field_date: Default::default(),
|
|
||||||
fast_field_i64s: Default::default(),
|
fast_field_i64s: Default::default(),
|
||||||
fast_field_u64s: Default::default(),
|
fast_field_u64s: Default::default(),
|
||||||
fast_field_f64s: Default::default(),
|
fast_field_f64s: Default::default(),
|
||||||
fast_field_dates: Default::default(),
|
|
||||||
fast_bytes: Default::default(),
|
fast_bytes: Default::default(),
|
||||||
fast_fields_composite: fast_fields_composite.clone(),
|
fast_fields_composite: fast_fields_composite.clone(),
|
||||||
};
|
};
|
||||||
@@ -103,12 +95,6 @@ impl FastFieldReaders {
|
|||||||
FastFieldReader::open(fast_field_data.clone()),
|
FastFieldReader::open(fast_field_data.clone()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
FastType::Date => {
|
|
||||||
fast_field_readers.fast_field_date.insert(
|
|
||||||
field,
|
|
||||||
FastFieldReader::open(fast_field_data.clone()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||||
@@ -144,14 +130,6 @@ impl FastFieldReaders {
|
|||||||
.fast_field_f64s
|
.fast_field_f64s
|
||||||
.insert(field, multivalued_int_fast_field);
|
.insert(field, multivalued_int_fast_field);
|
||||||
}
|
}
|
||||||
FastType::Date => {
|
|
||||||
let vals_reader = FastFieldReader::open(fast_field_data);
|
|
||||||
let multivalued_int_fast_field =
|
|
||||||
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
|
|
||||||
fast_field_readers
|
|
||||||
.fast_field_dates
|
|
||||||
.insert(field, multivalued_int_fast_field);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
|
||||||
@@ -178,6 +156,8 @@ impl FastFieldReaders {
|
|||||||
/// If the field is a i64-fast field, return the associated u64 reader. Values are
|
/// If the field is a i64-fast field, return the associated u64 reader. Values are
|
||||||
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
|
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
|
||||||
///
|
///
|
||||||
|
///TODO should it also be lenient with f64?
|
||||||
|
///
|
||||||
/// This method is useful when merging segment reader.
|
/// This method is useful when merging segment reader.
|
||||||
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
|
||||||
if let Some(u64_ff_reader) = self.u64(field) {
|
if let Some(u64_ff_reader) = self.u64(field) {
|
||||||
@@ -186,12 +166,6 @@ impl FastFieldReaders {
|
|||||||
if let Some(i64_ff_reader) = self.i64(field) {
|
if let Some(i64_ff_reader) = self.i64(field) {
|
||||||
return Some(i64_ff_reader.into_u64_reader());
|
return Some(i64_ff_reader.into_u64_reader());
|
||||||
}
|
}
|
||||||
if let Some(f64_ff_reader) = self.f64(field) {
|
|
||||||
return Some(f64_ff_reader.into_u64_reader());
|
|
||||||
}
|
|
||||||
if let Some(date_ff_reader) = self.date(field) {
|
|
||||||
return Some(date_ff_reader.into_u64_reader());
|
|
||||||
}
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,13 +176,6 @@ impl FastFieldReaders {
|
|||||||
self.fast_field_i64.get(&field).cloned()
|
self.fast_field_i64.get(&field).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `i64` fast field reader reader associated to `field`.
|
|
||||||
///
|
|
||||||
/// If `field` is not a i64 fast field, this method returns `None`.
|
|
||||||
pub fn date(&self, field: Field) -> Option<FastFieldReader<crate::DateTime>> {
|
|
||||||
self.fast_field_date.get(&field).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `f64` fast field reader reader associated to `field`.
|
/// Returns the `f64` fast field reader reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a f64 fast field, this method returns `None`.
|
/// If `field` is not a f64 fast field, this method returns `None`.
|
||||||
@@ -235,9 +202,6 @@ impl FastFieldReaders {
|
|||||||
if let Some(i64s_ff_reader) = self.i64s(field) {
|
if let Some(i64s_ff_reader) = self.i64s(field) {
|
||||||
return Some(i64s_ff_reader.into_u64s_reader());
|
return Some(i64s_ff_reader.into_u64s_reader());
|
||||||
}
|
}
|
||||||
if let Some(f64s_ff_reader) = self.f64s(field) {
|
|
||||||
return Some(f64s_ff_reader.into_u64s_reader());
|
|
||||||
}
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -255,13 +219,6 @@ impl FastFieldReaders {
|
|||||||
self.fast_field_f64s.get(&field).cloned()
|
self.fast_field_f64s.get(&field).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
|
|
||||||
///
|
|
||||||
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
|
|
||||||
pub fn dates(&self, field: Field) -> Option<MultiValueIntFastFieldReader<crate::DateTime>> {
|
|
||||||
self.fast_field_dates.get(&field).cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the `bytes` fast field reader associated to `field`.
|
/// Returns the `bytes` fast field reader associated to `field`.
|
||||||
///
|
///
|
||||||
/// If `field` is not a bytes fast field, returns `None`.
|
/// If `field` is not a bytes fast field, returns `None`.
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use crate::common::BinarySerializable;
|
|||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
|
||||||
use crate::postings::UnorderedTermId;
|
use crate::postings::UnorderedTermId;
|
||||||
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
|
use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
|
||||||
use crate::termdict::TermOrdinal;
|
use crate::termdict::TermOrdinal;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@@ -17,14 +17,6 @@ pub struct FastFieldsWriter {
|
|||||||
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
bytes_value_writers: Vec<BytesFastFieldWriter>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
|
|
||||||
match *field_entry.field_type() {
|
|
||||||
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
|
|
||||||
FieldType::F64(_) => common::f64_to_u64(0.0f64),
|
|
||||||
_ => 0u64,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FastFieldsWriter {
|
impl FastFieldsWriter {
|
||||||
/// Create all `FastFieldWriter` required by the schema.
|
/// Create all `FastFieldWriter` required by the schema.
|
||||||
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
|
||||||
@@ -33,15 +25,18 @@ impl FastFieldsWriter {
|
|||||||
let mut bytes_value_writers = Vec::new();
|
let mut bytes_value_writers = Vec::new();
|
||||||
|
|
||||||
for (field, field_entry) in schema.fields() {
|
for (field, field_entry) in schema.fields() {
|
||||||
|
let default_value = match *field_entry.field_type() {
|
||||||
|
FieldType::I64(_) => common::i64_to_u64(0i64),
|
||||||
|
FieldType::F64(_) => common::f64_to_u64(0.0f64),
|
||||||
|
_ => 0u64,
|
||||||
|
};
|
||||||
match *field_entry.field_type() {
|
match *field_entry.field_type() {
|
||||||
FieldType::I64(ref int_options)
|
FieldType::I64(ref int_options)
|
||||||
| FieldType::U64(ref int_options)
|
| FieldType::U64(ref int_options)
|
||||||
| FieldType::F64(ref int_options)
|
| FieldType::F64(ref int_options) => {
|
||||||
| FieldType::Date(ref int_options) => {
|
|
||||||
match int_options.get_fastfield_cardinality() {
|
match int_options.get_fastfield_cardinality() {
|
||||||
Some(Cardinality::SingleValue) => {
|
Some(Cardinality::SingleValue) => {
|
||||||
let mut fast_field_writer = IntFastFieldWriter::new(field);
|
let mut fast_field_writer = IntFastFieldWriter::new(field);
|
||||||
let default_value = fast_field_default_value(field_entry);
|
|
||||||
fast_field_writer.set_val_if_missing(default_value);
|
fast_field_writer.set_val_if_missing(default_value);
|
||||||
single_value_writers.push(fast_field_writer);
|
single_value_writers.push(fast_field_writer);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use super::operation::DeleteOperation;
|
|||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::ops::DerefMut;
|
use std::ops::DerefMut;
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
// The DeleteQueue is similar in conceptually to a multiple
|
// The DeleteQueue is similar in conceptually to a multiple
|
||||||
// consumer single producer broadcast channel.
|
// consumer single producer broadcast channel.
|
||||||
@@ -14,15 +14,14 @@ use std::sync::{Arc, RwLock, Weak};
|
|||||||
//
|
//
|
||||||
// New consumer can be created in two ways
|
// New consumer can be created in two ways
|
||||||
// - calling `delete_queue.cursor()` returns a cursor, that
|
// - calling `delete_queue.cursor()` returns a cursor, that
|
||||||
// will include all future delete operation (and some or none
|
// will include all future delete operation (and no past operations).
|
||||||
// of the past operations... The client is in charge of checking the opstamps.).
|
|
||||||
// - cloning an existing cursor returns a new cursor, that
|
// - cloning an existing cursor returns a new cursor, that
|
||||||
// is at the exact same position, and can now advance independently
|
// is at the exact same position, and can now advance independently
|
||||||
// from the original cursor.
|
// from the original cursor.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct InnerDeleteQueue {
|
struct InnerDeleteQueue {
|
||||||
writer: Vec<DeleteOperation>,
|
writer: Vec<DeleteOperation>,
|
||||||
last_block: Weak<Block>,
|
last_block: Option<Arc<Block>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -33,31 +32,21 @@ pub struct DeleteQueue {
|
|||||||
impl DeleteQueue {
|
impl DeleteQueue {
|
||||||
// Creates a new delete queue.
|
// Creates a new delete queue.
|
||||||
pub fn new() -> DeleteQueue {
|
pub fn new() -> DeleteQueue {
|
||||||
DeleteQueue {
|
let delete_queue = DeleteQueue {
|
||||||
inner: Arc::default(),
|
inner: Arc::default(),
|
||||||
}
|
};
|
||||||
}
|
|
||||||
|
let next_block = NextBlock::from(delete_queue.clone());
|
||||||
|
|
||||||
fn get_last_block(&self) -> Arc<Block> {
|
|
||||||
{
|
{
|
||||||
// try get the last block with simply acquiring the read lock.
|
let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
|
||||||
let rlock = self.inner.read().unwrap();
|
delete_queue_wlock.last_block = Some(Arc::new(Block {
|
||||||
if let Some(block) = rlock.last_block.upgrade() {
|
operations: Arc::default(),
|
||||||
return block;
|
next: next_block,
|
||||||
}
|
}));
|
||||||
}
|
}
|
||||||
// It failed. Let's double check after acquiring the write, as someone could have called
|
|
||||||
// `get_last_block` right after we released the rlock.
|
delete_queue
|
||||||
let mut wlock = self.inner.write().unwrap();
|
|
||||||
if let Some(block) = wlock.last_block.upgrade() {
|
|
||||||
return block;
|
|
||||||
}
|
|
||||||
let block = Arc::new(Block {
|
|
||||||
operations: Arc::default(),
|
|
||||||
next: NextBlock::from(self.clone()),
|
|
||||||
});
|
|
||||||
wlock.last_block = Arc::downgrade(&block);
|
|
||||||
block
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new cursor that makes it possible to
|
// Creates a new cursor that makes it possible to
|
||||||
@@ -65,7 +54,17 @@ impl DeleteQueue {
|
|||||||
//
|
//
|
||||||
// Past delete operations are not accessible.
|
// Past delete operations are not accessible.
|
||||||
pub fn cursor(&self) -> DeleteCursor {
|
pub fn cursor(&self) -> DeleteCursor {
|
||||||
let last_block = self.get_last_block();
|
let last_block = self
|
||||||
|
.inner
|
||||||
|
.read()
|
||||||
|
.expect("Read lock poisoned when opening delete queue cursor")
|
||||||
|
.last_block
|
||||||
|
.clone()
|
||||||
|
.expect(
|
||||||
|
"Failed to unwrap last_block. This should never happen
|
||||||
|
as the Option<> is only here to make
|
||||||
|
initialization possible",
|
||||||
|
);
|
||||||
let operations_len = last_block.operations.len();
|
let operations_len = last_block.operations.len();
|
||||||
DeleteCursor {
|
DeleteCursor {
|
||||||
block: last_block,
|
block: last_block,
|
||||||
@@ -101,19 +100,23 @@ impl DeleteQueue {
|
|||||||
.write()
|
.write()
|
||||||
.expect("Failed to acquire write lock on delete queue writer");
|
.expect("Failed to acquire write lock on delete queue writer");
|
||||||
|
|
||||||
if self_wlock.writer.is_empty() {
|
let delete_operations;
|
||||||
return None;
|
{
|
||||||
|
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
|
||||||
|
if writer.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
delete_operations = mem::replace(writer, vec![]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]);
|
let next_block = NextBlock::from(self.clone());
|
||||||
|
{
|
||||||
let new_block = Arc::new(Block {
|
self_wlock.last_block = Some(Arc::new(Block {
|
||||||
operations: Arc::new(delete_operations.into_boxed_slice()),
|
operations: Arc::new(delete_operations),
|
||||||
next: NextBlock::from(self.clone()),
|
next: next_block,
|
||||||
});
|
}));
|
||||||
|
}
|
||||||
self_wlock.last_block = Arc::downgrade(&new_block);
|
self_wlock.last_block.clone()
|
||||||
Some(new_block)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,7 +170,7 @@ impl NextBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct Block {
|
struct Block {
|
||||||
operations: Arc<Box<[DeleteOperation]>>,
|
operations: Arc<Vec<DeleteOperation>>,
|
||||||
next: NextBlock,
|
next: NextBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
use super::operation::{AddOperation, UserOperation};
|
use super::operation::{AddOperation, UserOperation};
|
||||||
use super::segment_updater::SegmentUpdater;
|
use super::segment_updater::SegmentUpdater;
|
||||||
use super::PreparedCommit;
|
use super::PreparedCommit;
|
||||||
use crate::common::BitSet;
|
|
||||||
use crate::core::Index;
|
use crate::core::Index;
|
||||||
use crate::core::Segment;
|
use crate::core::Segment;
|
||||||
use crate::core::SegmentComponent;
|
use crate::core::SegmentComponent;
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SegmentReader;
|
use crate::core::SegmentReader;
|
||||||
|
use crate::directory::DirectoryLock;
|
||||||
use crate::directory::TerminatingWrite;
|
use crate::directory::TerminatingWrite;
|
||||||
use crate::directory::{DirectoryLock, GarbageCollectionResult};
|
|
||||||
use crate::docset::DocSet;
|
use crate::docset::DocSet;
|
||||||
use crate::error::TantivyError;
|
use crate::error::TantivyError;
|
||||||
use crate::fastfield::write_delete_bitset;
|
use crate::fastfield::write_delete_bitset;
|
||||||
@@ -24,9 +23,10 @@ use crate::schema::Document;
|
|||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
|
use crate::Result;
|
||||||
|
use bit_set::BitSet;
|
||||||
use crossbeam::channel;
|
use crossbeam::channel;
|
||||||
use futures::executor::block_on;
|
use futures::{Canceled, Future};
|
||||||
use futures::future::Future;
|
|
||||||
use smallvec::smallvec;
|
use smallvec::smallvec;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
@@ -72,7 +72,7 @@ pub struct IndexWriter {
|
|||||||
|
|
||||||
heap_size_in_bytes_per_thread: usize,
|
heap_size_in_bytes_per_thread: usize,
|
||||||
|
|
||||||
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>,
|
workers_join_handle: Vec<JoinHandle<Result<()>>>,
|
||||||
|
|
||||||
operation_receiver: OperationReceiver,
|
operation_receiver: OperationReceiver,
|
||||||
operation_sender: OperationSender,
|
operation_sender: OperationSender,
|
||||||
@@ -95,7 +95,7 @@ fn compute_deleted_bitset(
|
|||||||
delete_cursor: &mut DeleteCursor,
|
delete_cursor: &mut DeleteCursor,
|
||||||
doc_opstamps: &DocToOpstampMapping,
|
doc_opstamps: &DocToOpstampMapping,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> crate::Result<bool> {
|
) -> Result<bool> {
|
||||||
let mut might_have_changed = false;
|
let mut might_have_changed = false;
|
||||||
while let Some(delete_op) = delete_cursor.get() {
|
while let Some(delete_op) = delete_cursor.get() {
|
||||||
if delete_op.opstamp > target_opstamp {
|
if delete_op.opstamp > target_opstamp {
|
||||||
@@ -115,7 +115,7 @@ fn compute_deleted_bitset(
|
|||||||
while docset.advance() {
|
while docset.advance() {
|
||||||
let deleted_doc = docset.doc();
|
let deleted_doc = docset.doc();
|
||||||
if deleted_doc < limit_doc {
|
if deleted_doc < limit_doc {
|
||||||
delete_bitset.insert(deleted_doc);
|
delete_bitset.insert(deleted_doc as usize);
|
||||||
might_have_changed = true;
|
might_have_changed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -126,73 +126,65 @@ fn compute_deleted_bitset(
|
|||||||
Ok(might_have_changed)
|
Ok(might_have_changed)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Advance delete for the given segment up to the target opstamp.
|
/// Advance delete for the given segment up
|
||||||
///
|
/// to the target opstamp.
|
||||||
/// Note that there are no guarantee that the resulting `segment_entry` delete_opstamp
|
|
||||||
/// is `==` target_opstamp.
|
|
||||||
/// For instance, there was no delete operation between the state of the `segment_entry` and
|
|
||||||
/// the `target_opstamp`, `segment_entry` is not updated.
|
|
||||||
pub(crate) fn advance_deletes(
|
pub(crate) fn advance_deletes(
|
||||||
mut segment: Segment,
|
mut segment: Segment,
|
||||||
segment_entry: &mut SegmentEntry,
|
segment_entry: &mut SegmentEntry,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
) -> crate::Result<()> {
|
) -> Result<()> {
|
||||||
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
{
|
||||||
// We are already up-to-date here.
|
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
|
||||||
return Ok(());
|
// We are already up-to-date here.
|
||||||
}
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() {
|
let segment_reader = SegmentReader::open(&segment)?;
|
||||||
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let segment_reader = SegmentReader::open(&segment)?;
|
let max_doc = segment_reader.max_doc();
|
||||||
|
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
||||||
|
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
||||||
|
None => BitSet::with_capacity(max_doc as usize),
|
||||||
|
};
|
||||||
|
|
||||||
let max_doc = segment_reader.max_doc();
|
let delete_cursor = segment_entry.delete_cursor();
|
||||||
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
|
|
||||||
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
|
|
||||||
None => BitSet::with_max_value(max_doc),
|
|
||||||
};
|
|
||||||
|
|
||||||
compute_deleted_bitset(
|
compute_deleted_bitset(
|
||||||
&mut delete_bitset,
|
&mut delete_bitset,
|
||||||
&segment_reader,
|
&segment_reader,
|
||||||
segment_entry.delete_cursor(),
|
delete_cursor,
|
||||||
&DocToOpstampMapping::None,
|
&DocToOpstampMapping::None,
|
||||||
target_opstamp,
|
target_opstamp,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// TODO optimize
|
// TODO optimize
|
||||||
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
|
|
||||||
for doc in 0u32..max_doc {
|
for doc in 0u32..max_doc {
|
||||||
if seg_delete_bitset.is_deleted(doc) {
|
if segment_reader.is_deleted(doc) {
|
||||||
delete_bitset.insert(doc);
|
delete_bitset.insert(doc as usize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let num_deleted_docs = delete_bitset.len();
|
let num_deleted_docs = delete_bitset.len();
|
||||||
if num_deleted_docs > 0 {
|
if num_deleted_docs > 0 {
|
||||||
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
|
||||||
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
|
||||||
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?;
|
write_delete_bitset(&delete_bitset, &mut delete_file)?;
|
||||||
delete_file.terminate()?;
|
delete_file.terminate()?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
segment_entry.set_meta(segment.meta().clone());
|
segment_entry.set_meta(segment.meta().clone());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index_documents(
|
fn index_documents(
|
||||||
memory_budget: usize,
|
memory_budget: usize,
|
||||||
segment: Segment,
|
segment: &Segment,
|
||||||
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
|
||||||
segment_updater: &mut SegmentUpdater,
|
segment_updater: &mut SegmentUpdater,
|
||||||
mut delete_cursor: DeleteCursor,
|
mut delete_cursor: DeleteCursor,
|
||||||
) -> crate::Result<bool> {
|
) -> Result<bool> {
|
||||||
let schema = segment.schema();
|
let schema = segment.schema();
|
||||||
|
let segment_id = segment.id();
|
||||||
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
|
||||||
for document_group in grouped_document_iterator {
|
for document_group in grouped_document_iterator {
|
||||||
for doc in document_group {
|
for doc in document_group {
|
||||||
@@ -212,32 +204,22 @@ fn index_documents(
|
|||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
let max_doc = segment_writer.max_doc();
|
let num_docs = segment_writer.max_doc();
|
||||||
|
|
||||||
// this is ensured by the call to peek before starting
|
// this is ensured by the call to peek before starting
|
||||||
// the worker thread.
|
// the worker thread.
|
||||||
assert!(max_doc > 0);
|
assert!(num_docs > 0);
|
||||||
|
|
||||||
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
|
||||||
|
let segment_meta = segment.index().new_segment_meta(segment_id, num_docs);
|
||||||
let segment_with_max_doc = segment.with_max_doc(max_doc);
|
|
||||||
|
|
||||||
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
|
||||||
|
|
||||||
let delete_bitset_opt = apply_deletes(
|
let delete_bitset_opt =
|
||||||
&segment_with_max_doc,
|
apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
|
||||||
&mut delete_cursor,
|
|
||||||
&doc_opstamps,
|
|
||||||
last_docstamp,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let segment_entry = SegmentEntry::new(
|
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, delete_bitset_opt);
|
||||||
segment_with_max_doc.meta().clone(),
|
Ok(segment_updater.add_segment(segment_entry))
|
||||||
delete_cursor,
|
|
||||||
delete_bitset_opt,
|
|
||||||
);
|
|
||||||
block_on(segment_updater.schedule_add_segment(segment_entry))?;
|
|
||||||
Ok(true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_deletes(
|
fn apply_deletes(
|
||||||
@@ -245,7 +227,7 @@ fn apply_deletes(
|
|||||||
mut delete_cursor: &mut DeleteCursor,
|
mut delete_cursor: &mut DeleteCursor,
|
||||||
doc_opstamps: &[Opstamp],
|
doc_opstamps: &[Opstamp],
|
||||||
last_docstamp: Opstamp,
|
last_docstamp: Opstamp,
|
||||||
) -> crate::Result<Option<BitSet>> {
|
) -> Result<Option<BitSet<u32>>> {
|
||||||
if delete_cursor.get().is_none() {
|
if delete_cursor.get().is_none() {
|
||||||
// if there are no delete operation in the queue, no need
|
// if there are no delete operation in the queue, no need
|
||||||
// to even open the segment.
|
// to even open the segment.
|
||||||
@@ -253,9 +235,7 @@ fn apply_deletes(
|
|||||||
}
|
}
|
||||||
let segment_reader = SegmentReader::open(segment)?;
|
let segment_reader = SegmentReader::open(segment)?;
|
||||||
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
||||||
|
let mut deleted_bitset = BitSet::with_capacity(segment_reader.max_doc() as usize);
|
||||||
let max_doc = segment.meta().max_doc();
|
|
||||||
let mut deleted_bitset = BitSet::with_max_value(max_doc);
|
|
||||||
let may_have_deletes = compute_deleted_bitset(
|
let may_have_deletes = compute_deleted_bitset(
|
||||||
&mut deleted_bitset,
|
&mut deleted_bitset,
|
||||||
&segment_reader,
|
&segment_reader,
|
||||||
@@ -290,7 +270,7 @@ impl IndexWriter {
|
|||||||
num_threads: usize,
|
num_threads: usize,
|
||||||
heap_size_in_bytes_per_thread: usize,
|
heap_size_in_bytes_per_thread: usize,
|
||||||
directory_lock: DirectoryLock,
|
directory_lock: DirectoryLock,
|
||||||
) -> crate::Result<IndexWriter> {
|
) -> Result<IndexWriter> {
|
||||||
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
|
||||||
let err_msg = format!(
|
let err_msg = format!(
|
||||||
"The heap size per thread needs to be at least {}.",
|
"The heap size per thread needs to be at least {}.",
|
||||||
@@ -339,17 +319,12 @@ impl IndexWriter {
|
|||||||
Ok(index_writer)
|
Ok(index_writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn drop_sender(&mut self) {
|
|
||||||
let (sender, _receiver) = channel::bounded(1);
|
|
||||||
mem::replace(&mut self.operation_sender, sender);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// If there are some merging threads, blocks until they all finish their work and
|
/// If there are some merging threads, blocks until they all finish their work and
|
||||||
/// then drop the `IndexWriter`.
|
/// then drop the `IndexWriter`.
|
||||||
pub fn wait_merging_threads(mut self) -> crate::Result<()> {
|
pub fn wait_merging_threads(mut self) -> Result<()> {
|
||||||
// this will stop the indexing thread,
|
// this will stop the indexing thread,
|
||||||
// dropping the last reference to the segment_updater.
|
// dropping the last reference to the segment_updater.
|
||||||
self.drop_sender();
|
drop(self.operation_sender);
|
||||||
|
|
||||||
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
|
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
|
||||||
for join_handle in former_workers_handles {
|
for join_handle in former_workers_handles {
|
||||||
@@ -360,6 +335,7 @@ impl IndexWriter {
|
|||||||
TantivyError::ErrorInThread("Error in indexing worker thread.".into())
|
TantivyError::ErrorInThread("Error in indexing worker thread.".into())
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
drop(self.workers_join_handle);
|
||||||
|
|
||||||
let result = self
|
let result = self
|
||||||
.segment_updater
|
.segment_updater
|
||||||
@@ -374,10 +350,10 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> {
|
pub fn add_segment(&mut self, segment_meta: SegmentMeta) {
|
||||||
let delete_cursor = self.delete_queue.cursor();
|
let delete_cursor = self.delete_queue.cursor();
|
||||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
|
||||||
block_on(self.segment_updater.schedule_add_segment(segment_entry))
|
self.segment_updater.add_segment(segment_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new segment.
|
/// Creates a new segment.
|
||||||
@@ -394,7 +370,7 @@ impl IndexWriter {
|
|||||||
|
|
||||||
/// Spawns a new worker thread for indexing.
|
/// Spawns a new worker thread for indexing.
|
||||||
/// The thread consumes documents from the pipeline.
|
/// The thread consumes documents from the pipeline.
|
||||||
fn add_indexing_worker(&mut self) -> crate::Result<()> {
|
fn add_indexing_worker(&mut self) -> Result<()> {
|
||||||
let document_receiver_clone = self.operation_receiver.clone();
|
let document_receiver_clone = self.operation_receiver.clone();
|
||||||
let mut segment_updater = self.segment_updater.clone();
|
let mut segment_updater = self.segment_updater.clone();
|
||||||
|
|
||||||
@@ -402,7 +378,7 @@ impl IndexWriter {
|
|||||||
|
|
||||||
let mem_budget = self.heap_size_in_bytes_per_thread;
|
let mem_budget = self.heap_size_in_bytes_per_thread;
|
||||||
let index = self.index.clone();
|
let index = self.index.clone();
|
||||||
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new()
|
let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
|
||||||
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
.name(format!("thrd-tantivy-index{}", self.worker_id))
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
loop {
|
loop {
|
||||||
@@ -431,7 +407,7 @@ impl IndexWriter {
|
|||||||
let segment = index.new_segment();
|
let segment = index.new_segment();
|
||||||
index_documents(
|
index_documents(
|
||||||
mem_budget,
|
mem_budget,
|
||||||
segment,
|
&segment,
|
||||||
&mut document_iterator,
|
&mut document_iterator,
|
||||||
&mut segment_updater,
|
&mut segment_updater,
|
||||||
delete_cursor.clone(),
|
delete_cursor.clone(),
|
||||||
@@ -448,23 +424,22 @@ impl IndexWriter {
|
|||||||
self.segment_updater.get_merge_policy()
|
self.segment_updater.get_merge_policy()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Setter for the merge policy.
|
/// Set the merge policy.
|
||||||
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
||||||
self.segment_updater.set_merge_policy(merge_policy);
|
self.segment_updater.set_merge_policy(merge_policy);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_workers(&mut self) -> crate::Result<()> {
|
fn start_workers(&mut self) -> Result<()> {
|
||||||
for _ in 0..self.num_threads {
|
for _ in 0..self.num_threads {
|
||||||
self.add_indexing_worker()?;
|
self.add_indexing_worker()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Detects and removes the files that are not used by the index anymore.
|
/// Detects and removes the files that
|
||||||
pub fn garbage_collect_files(
|
/// are not used by the index anymore.
|
||||||
&self,
|
pub fn garbage_collect_files(&mut self) -> Result<()> {
|
||||||
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
|
self.segment_updater.garbage_collect_files().wait()
|
||||||
self.segment_updater.schedule_garbage_collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes all documents from the index
|
/// Deletes all documents from the index
|
||||||
@@ -503,7 +478,7 @@ impl IndexWriter {
|
|||||||
/// Ok(())
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub fn delete_all_documents(&self) -> crate::Result<Opstamp> {
|
pub fn delete_all_documents(&mut self) -> Result<Opstamp> {
|
||||||
// Delete segments
|
// Delete segments
|
||||||
self.segment_updater.remove_all_segments();
|
self.segment_updater.remove_all_segments();
|
||||||
// Return new stamp - reverted stamp
|
// Return new stamp - reverted stamp
|
||||||
@@ -517,10 +492,8 @@ impl IndexWriter {
|
|||||||
pub fn merge(
|
pub fn merge(
|
||||||
&mut self,
|
&mut self,
|
||||||
segment_ids: &[SegmentId],
|
segment_ids: &[SegmentId],
|
||||||
) -> impl Future<Output = crate::Result<SegmentMeta>> {
|
) -> Result<impl Future<Item = SegmentMeta, Error = Canceled>> {
|
||||||
let merge_operation = self.segment_updater.make_merge_operation(segment_ids);
|
self.segment_updater.start_merge(segment_ids)
|
||||||
let segment_updater = self.segment_updater.clone();
|
|
||||||
async move { segment_updater.start_merge(merge_operation)?.await }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Closes the current document channel send.
|
/// Closes the current document channel send.
|
||||||
@@ -546,8 +519,13 @@ impl IndexWriter {
|
|||||||
/// state as it was after the last commit.
|
/// state as it was after the last commit.
|
||||||
///
|
///
|
||||||
/// The opstamp at the last commit is returned.
|
/// The opstamp at the last commit is returned.
|
||||||
pub fn rollback(&mut self) -> crate::Result<Opstamp> {
|
pub fn rollback(&mut self) -> Result<Opstamp> {
|
||||||
info!("Rolling back to opstamp {}", self.committed_opstamp);
|
info!("Rolling back to opstamp {}", self.committed_opstamp);
|
||||||
|
self.rollback_impl()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Private, implementation of rollback
|
||||||
|
fn rollback_impl(&mut self) -> Result<Opstamp> {
|
||||||
// marks the segment updater as killed. From now on, all
|
// marks the segment updater as killed. From now on, all
|
||||||
// segment updates will be ignored.
|
// segment updates will be ignored.
|
||||||
self.segment_updater.kill();
|
self.segment_updater.kill();
|
||||||
@@ -603,7 +581,7 @@ impl IndexWriter {
|
|||||||
/// It is also possible to add a payload to the `commit`
|
/// It is also possible to add a payload to the `commit`
|
||||||
/// using this API.
|
/// using this API.
|
||||||
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
|
||||||
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> {
|
pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> {
|
||||||
// Here, because we join all of the worker threads,
|
// Here, because we join all of the worker threads,
|
||||||
// all of the segment update for this commit have been
|
// all of the segment update for this commit have been
|
||||||
// sent.
|
// sent.
|
||||||
@@ -650,7 +628,7 @@ impl IndexWriter {
|
|||||||
/// Commit returns the `opstamp` of the last document
|
/// Commit returns the `opstamp` of the last document
|
||||||
/// that made it in the commit.
|
/// that made it in the commit.
|
||||||
///
|
///
|
||||||
pub fn commit(&mut self) -> crate::Result<Opstamp> {
|
pub fn commit(&mut self) -> Result<Opstamp> {
|
||||||
self.prepare_commit()?.commit()
|
self.prepare_commit()?.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -691,6 +669,9 @@ impl IndexWriter {
|
|||||||
/// The opstamp is an increasing `u64` that can
|
/// The opstamp is an increasing `u64` that can
|
||||||
/// be used by the client to align commits with its own
|
/// be used by the client to align commits with its own
|
||||||
/// document queue.
|
/// document queue.
|
||||||
|
///
|
||||||
|
/// Currently it represents the number of documents that
|
||||||
|
/// have been added since the creation of the index.
|
||||||
pub fn add_document(&self, document: Document) -> Opstamp {
|
pub fn add_document(&self, document: Document) -> Opstamp {
|
||||||
let opstamp = self.stamper.stamp();
|
let opstamp = self.stamper.stamp();
|
||||||
let add_operation = AddOperation { opstamp, document };
|
let add_operation = AddOperation { opstamp, document };
|
||||||
@@ -764,16 +745,6 @@ impl IndexWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for IndexWriter {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.segment_updater.kill();
|
|
||||||
self.drop_sender();
|
|
||||||
for work in self.workers_join_handle.drain(..) {
|
|
||||||
let _ = work.join();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
@@ -783,7 +754,7 @@ mod tests {
|
|||||||
use crate::error::*;
|
use crate::error::*;
|
||||||
use crate::indexer::NoMergePolicy;
|
use crate::indexer::NoMergePolicy;
|
||||||
use crate::query::TermQuery;
|
use crate::query::TermQuery;
|
||||||
use crate::schema::{self, IndexRecordOption, STRING};
|
use crate::schema::{self, IndexRecordOption};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use crate::ReloadPolicy;
|
use crate::ReloadPolicy;
|
||||||
use crate::Term;
|
use crate::Term;
|
||||||
@@ -1208,16 +1179,4 @@ mod tests {
|
|||||||
assert!(clear_again.is_ok());
|
assert!(clear_again.is_ok());
|
||||||
assert!(commit_again.is_ok());
|
assert!(commit_again.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_index_doc_missing_field() {
|
|
||||||
let mut schema_builder = schema::Schema::builder();
|
|
||||||
let idfield = schema_builder.add_text_field("id", STRING);
|
|
||||||
schema_builder.add_text_field("optfield", STRING);
|
|
||||||
let index = Index::create_in_ram(schema_builder.build());
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
index_writer.add_document(doc!(idfield=>"myid"));
|
|
||||||
let commit = index_writer.commit();
|
|
||||||
assert!(commit.is_ok());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,23 +2,14 @@ use crate::Opstamp;
|
|||||||
use crate::SegmentId;
|
use crate::SegmentId;
|
||||||
use census::{Inventory, TrackedObject};
|
use census::{Inventory, TrackedObject};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
|
||||||
|
|
||||||
impl Deref for MergeOperationInventory {
|
|
||||||
type Target = Inventory<InnerMergeOperation>;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MergeOperationInventory {
|
impl MergeOperationInventory {
|
||||||
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
|
pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
|
||||||
let mut segment_in_merge = HashSet::default();
|
let mut segment_in_merge = HashSet::default();
|
||||||
for merge_op in self.list() {
|
for merge_op in self.0.list() {
|
||||||
for &segment_id in &merge_op.segment_ids {
|
for &segment_id in &merge_op.segment_ids {
|
||||||
segment_in_merge.insert(segment_id);
|
segment_in_merge.insert(segment_id);
|
||||||
}
|
}
|
||||||
@@ -44,13 +35,13 @@ pub struct MergeOperation {
|
|||||||
inner: TrackedObject<InnerMergeOperation>,
|
inner: TrackedObject<InnerMergeOperation>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct InnerMergeOperation {
|
struct InnerMergeOperation {
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MergeOperation {
|
impl MergeOperation {
|
||||||
pub(crate) fn new(
|
pub fn new(
|
||||||
inventory: &MergeOperationInventory,
|
inventory: &MergeOperationInventory,
|
||||||
target_opstamp: Opstamp,
|
target_opstamp: Opstamp,
|
||||||
segment_ids: Vec<SegmentId>,
|
segment_ids: Vec<SegmentId>,
|
||||||
@@ -60,7 +51,7 @@ impl MergeOperation {
|
|||||||
segment_ids,
|
segment_ids,
|
||||||
};
|
};
|
||||||
MergeOperation {
|
MergeOperation {
|
||||||
inner: inventory.track(inner_merge_operation),
|
inner: inventory.0.track(inner_merge_operation),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -709,7 +709,7 @@ mod tests {
|
|||||||
use crate::IndexWriter;
|
use crate::IndexWriter;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
|
||||||
use futures::executor::block_on;
|
use futures::Future;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -792,7 +792,11 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
index_writer
|
||||||
|
.merge(&segment_ids)
|
||||||
|
.expect("Failed to initiate merge")
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads().unwrap();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@@ -1036,7 +1040,11 @@ mod tests {
|
|||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
index_writer
|
||||||
|
.merge(&segment_ids)
|
||||||
|
.expect("Failed to initiate merge")
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.segment_readers().len(), 1);
|
assert_eq!(searcher.segment_readers().len(), 1);
|
||||||
@@ -1131,7 +1139,11 @@ mod tests {
|
|||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
index_writer
|
||||||
|
.merge(&segment_ids)
|
||||||
|
.expect("Failed to initiate merge")
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
|
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
@@ -1265,7 +1277,11 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
index_writer
|
||||||
|
.merge(&segment_ids)
|
||||||
|
.expect("Failed to initiate merge")
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
index_writer.wait_merging_threads().unwrap();
|
index_writer.wait_merging_threads().unwrap();
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
test_searcher(
|
test_searcher(
|
||||||
@@ -1320,7 +1336,11 @@ mod tests {
|
|||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
|
index_writer
|
||||||
|
.merge(&segment_ids)
|
||||||
|
.expect("Failed to initiate merge")
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
reader.reload().unwrap();
|
reader.reload().unwrap();
|
||||||
// commit has not been called yet. The document should still be
|
// commit has not been called yet. The document should still be
|
||||||
// there.
|
// there.
|
||||||
@@ -1341,18 +1361,22 @@ mod tests {
|
|||||||
let mut doc = Document::default();
|
let mut doc = Document::default();
|
||||||
doc.add_u64(int_field, 1);
|
doc.add_u64(int_field, 1);
|
||||||
index_writer.add_document(doc.clone());
|
index_writer.add_document(doc.clone());
|
||||||
assert!(index_writer.commit().is_ok());
|
index_writer.commit().expect("commit failed");
|
||||||
index_writer.add_document(doc);
|
index_writer.add_document(doc);
|
||||||
assert!(index_writer.commit().is_ok());
|
index_writer.commit().expect("commit failed");
|
||||||
index_writer.delete_term(Term::from_field_u64(int_field, 1));
|
index_writer.delete_term(Term::from_field_u64(int_field, 1));
|
||||||
|
|
||||||
let segment_ids = index
|
let segment_ids = index
|
||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
index_writer
|
||||||
|
.merge(&segment_ids)
|
||||||
|
.expect("Failed to initiate merge")
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
|
|
||||||
// assert delete has not been committed
|
// assert delete has not been committed
|
||||||
assert!(reader.reload().is_ok());
|
reader.reload().expect("failed to load searcher 1");
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
assert_eq!(searcher.num_docs(), 2);
|
assert_eq!(searcher.num_docs(), 2);
|
||||||
|
|
||||||
@@ -1391,12 +1415,12 @@ mod tests {
|
|||||||
index_doc(&mut index_writer, &[1, 5]);
|
index_doc(&mut index_writer, &[1, 5]);
|
||||||
index_doc(&mut index_writer, &[3]);
|
index_doc(&mut index_writer, &[3]);
|
||||||
index_doc(&mut index_writer, &[17]);
|
index_doc(&mut index_writer, &[17]);
|
||||||
assert!(index_writer.commit().is_ok());
|
index_writer.commit().expect("committed");
|
||||||
index_doc(&mut index_writer, &[20]);
|
index_doc(&mut index_writer, &[20]);
|
||||||
assert!(index_writer.commit().is_ok());
|
index_writer.commit().expect("committed");
|
||||||
index_doc(&mut index_writer, &[28, 27]);
|
index_doc(&mut index_writer, &[28, 27]);
|
||||||
index_doc(&mut index_writer, &[1_000]);
|
index_doc(&mut index_writer, &[1_000]);
|
||||||
assert!(index_writer.commit().is_ok());
|
index_writer.commit().expect("committed");
|
||||||
}
|
}
|
||||||
let reader = index.reader().unwrap();
|
let reader = index.reader().unwrap();
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
@@ -1428,6 +1452,15 @@ mod tests {
|
|||||||
assert_eq!(&vals, &[17]);
|
assert_eq!(&vals, &[17]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"{:?}",
|
||||||
|
searcher
|
||||||
|
.segment_readers()
|
||||||
|
.iter()
|
||||||
|
.map(|reader| reader.max_doc())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
);
|
||||||
|
|
||||||
{
|
{
|
||||||
let segment = searcher.segment_reader(1u32);
|
let segment = searcher.segment_reader(1u32);
|
||||||
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||||
@@ -1451,13 +1484,27 @@ mod tests {
|
|||||||
.searchable_segment_ids()
|
.searchable_segment_ids()
|
||||||
.expect("Searchable segments failed.");
|
.expect("Searchable segments failed.");
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||||
assert!(block_on(index_writer.merge(&segment_ids)).is_ok());
|
index_writer
|
||||||
assert!(index_writer.wait_merging_threads().is_ok());
|
.merge(&segment_ids)
|
||||||
|
.expect("Failed to initiate merge")
|
||||||
|
.wait()
|
||||||
|
.expect("Merging failed");
|
||||||
|
index_writer
|
||||||
|
.wait_merging_threads()
|
||||||
|
.expect("Wait for merging threads");
|
||||||
}
|
}
|
||||||
assert!(reader.reload().is_ok());
|
reader.reload().expect("Load searcher");
|
||||||
|
|
||||||
{
|
{
|
||||||
let searcher = reader.searcher();
|
let searcher = reader.searcher();
|
||||||
|
println!(
|
||||||
|
"{:?}",
|
||||||
|
searcher
|
||||||
|
.segment_readers()
|
||||||
|
.iter()
|
||||||
|
.map(|reader| reader.max_doc())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
);
|
||||||
let segment = searcher.segment_reader(0u32);
|
let segment = searcher.segment_reader(0u32);
|
||||||
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
|
||||||
|
|
||||||
@@ -1492,46 +1539,4 @@ mod tests {
|
|||||||
assert_eq!(&vals, &[20]);
|
assert_eq!(&vals, &[20]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
|
|
||||||
let mut builder = schema::SchemaBuilder::new();
|
|
||||||
|
|
||||||
let fast_multi = IntOptions::default().set_fast(Cardinality::MultiValues);
|
|
||||||
|
|
||||||
let field = builder.add_f64_field("f64", schema::FAST);
|
|
||||||
let multi_field = builder.add_f64_field("f64s", fast_multi);
|
|
||||||
|
|
||||||
let index = Index::create_in_ram(builder.build());
|
|
||||||
|
|
||||||
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
|
|
||||||
|
|
||||||
// Make sure we'll attempt to merge every created segment
|
|
||||||
let mut policy = crate::indexer::LogMergePolicy::default();
|
|
||||||
policy.set_min_merge_size(2);
|
|
||||||
writer.set_merge_policy(Box::new(policy));
|
|
||||||
|
|
||||||
for i in 0..100 {
|
|
||||||
let mut doc = Document::new();
|
|
||||||
doc.add_f64(field, 42.0);
|
|
||||||
|
|
||||||
doc.add_f64(multi_field, 0.24);
|
|
||||||
doc.add_f64(multi_field, 0.27);
|
|
||||||
|
|
||||||
writer.add_document(doc);
|
|
||||||
|
|
||||||
if i % 5 == 0 {
|
|
||||||
writer.commit()?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.commit()?;
|
|
||||||
writer.wait_merging_threads()?;
|
|
||||||
|
|
||||||
// If a merging thread fails, we should end up with more
|
|
||||||
// than one segment here
|
|
||||||
assert_eq!(1, index.searchable_segments()?.len());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ mod stamper;
|
|||||||
|
|
||||||
pub use self::index_writer::IndexWriter;
|
pub use self::index_writer::IndexWriter;
|
||||||
pub use self::log_merge_policy::LogMergePolicy;
|
pub use self::log_merge_policy::LogMergePolicy;
|
||||||
pub use self::merge_operation::MergeOperation;
|
pub use self::merge_operation::{MergeOperation, MergeOperationInventory};
|
||||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||||
pub use self::prepared_commit::PreparedCommit;
|
pub use self::prepared_commit::PreparedCommit;
|
||||||
pub use self::segment_entry::SegmentEntry;
|
pub use self::segment_entry::SegmentEntry;
|
||||||
@@ -28,26 +28,3 @@ pub use self::segment_writer::SegmentWriter;
|
|||||||
|
|
||||||
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
/// Alias for the default merge policy, which is the `LogMergePolicy`.
|
||||||
pub type DefaultMergePolicy = LogMergePolicy;
|
pub type DefaultMergePolicy = LogMergePolicy;
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::schema::{self, Schema};
|
|
||||||
use crate::{Index, Term};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_advance_delete_bug() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
|
||||||
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
|
|
||||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
|
||||||
// there must be one deleted document in the segment
|
|
||||||
index_writer.add_document(doc!(text_field=>"b"));
|
|
||||||
index_writer.delete_term(Term::from_field_text(text_field, "b"));
|
|
||||||
// we need enough data to trigger the bug (at least 32 documents)
|
|
||||||
for _ in 0..32 {
|
|
||||||
index_writer.add_document(doc!(text_field=>"c"));
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ pub struct AddOperation {
|
|||||||
/// UserOperation is an enum type that encapsulates other operation types.
|
/// UserOperation is an enum type that encapsulates other operation types.
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub enum UserOperation {
|
pub enum UserOperation {
|
||||||
/// Add operation
|
|
||||||
Add(Document),
|
Add(Document),
|
||||||
/// Delete operation
|
|
||||||
Delete(Term),
|
Delete(Term),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use super::IndexWriter;
|
use super::IndexWriter;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use futures::executor::block_on;
|
|
||||||
|
|
||||||
/// A prepared commit
|
/// A prepared commit
|
||||||
pub struct PreparedCommit<'a> {
|
pub struct PreparedCommit<'a> {
|
||||||
@@ -33,11 +32,9 @@ impl<'a> PreparedCommit<'a> {
|
|||||||
|
|
||||||
pub fn commit(self) -> Result<Opstamp> {
|
pub fn commit(self) -> Result<Opstamp> {
|
||||||
info!("committing {}", self.opstamp);
|
info!("committing {}", self.opstamp);
|
||||||
let _ = block_on(
|
self.index_writer
|
||||||
self.index_writer
|
.segment_updater()
|
||||||
.segment_updater()
|
.commit(self.opstamp, self.payload)?;
|
||||||
.schedule_commit(self.opstamp, self.payload),
|
|
||||||
);
|
|
||||||
Ok(self.opstamp)
|
Ok(self.opstamp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use crate::common::BitSet;
|
|
||||||
use crate::core::SegmentId;
|
use crate::core::SegmentId;
|
||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
|
use bit_set::BitSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
/// A segment entry describes the state of
|
/// A segment entry describes the state of
|
||||||
|
|||||||
@@ -16,28 +16,6 @@ struct SegmentRegisters {
|
|||||||
committed: SegmentRegister,
|
committed: SegmentRegister,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq)]
|
|
||||||
pub(crate) enum SegmentsStatus {
|
|
||||||
Committed,
|
|
||||||
Uncommitted,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SegmentRegisters {
|
|
||||||
/// Check if all the segments are committed or uncommited.
|
|
||||||
///
|
|
||||||
/// If some segment is missing or segments are in a different state (this should not happen
|
|
||||||
/// if tantivy is used correctly), returns `None`.
|
|
||||||
fn segments_status(&self, segment_ids: &[SegmentId]) -> Option<SegmentsStatus> {
|
|
||||||
if self.uncommitted.contains_all(segment_ids) {
|
|
||||||
Some(SegmentsStatus::Uncommitted)
|
|
||||||
} else if self.committed.contains_all(segment_ids) {
|
|
||||||
Some(SegmentsStatus::Committed)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The segment manager stores the list of segments
|
/// The segment manager stores the list of segments
|
||||||
/// as well as their state.
|
/// as well as their state.
|
||||||
///
|
///
|
||||||
@@ -175,35 +153,33 @@ impl SegmentManager {
|
|||||||
let mut registers_lock = self.write();
|
let mut registers_lock = self.write();
|
||||||
registers_lock.uncommitted.add_segment_entry(segment_entry);
|
registers_lock.uncommitted.add_segment_entry(segment_entry);
|
||||||
}
|
}
|
||||||
// Replace a list of segments for their equivalent merged segment.
|
|
||||||
//
|
pub fn end_merge(
|
||||||
// Returns true if these segments are committed, false if the merge segments are uncommited.
|
|
||||||
pub(crate) fn end_merge(
|
|
||||||
&self,
|
&self,
|
||||||
before_merge_segment_ids: &[SegmentId],
|
before_merge_segment_ids: &[SegmentId],
|
||||||
after_merge_segment_entry: SegmentEntry,
|
after_merge_segment_entry: SegmentEntry,
|
||||||
) -> crate::Result<SegmentsStatus> {
|
) {
|
||||||
let mut registers_lock = self.write();
|
let mut registers_lock = self.write();
|
||||||
let segments_status = registers_lock
|
let target_register: &mut SegmentRegister = {
|
||||||
.segments_status(before_merge_segment_ids)
|
if registers_lock
|
||||||
.ok_or_else(|| {
|
.uncommitted
|
||||||
|
.contains_all(before_merge_segment_ids)
|
||||||
|
{
|
||||||
|
&mut registers_lock.uncommitted
|
||||||
|
} else if registers_lock
|
||||||
|
.committed
|
||||||
|
.contains_all(before_merge_segment_ids)
|
||||||
|
{
|
||||||
|
&mut registers_lock.committed
|
||||||
|
} else {
|
||||||
warn!("couldn't find segment in SegmentManager");
|
warn!("couldn't find segment in SegmentManager");
|
||||||
crate::Error::InvalidArgument(
|
return;
|
||||||
"The segments that were merged could not be found in the SegmentManager. \
|
}
|
||||||
This is not necessarily a bug, and can happen after a rollback for instance."
|
|
||||||
.to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let target_register: &mut SegmentRegister = match segments_status {
|
|
||||||
SegmentsStatus::Uncommitted => &mut registers_lock.uncommitted,
|
|
||||||
SegmentsStatus::Committed => &mut registers_lock.committed,
|
|
||||||
};
|
};
|
||||||
for segment_id in before_merge_segment_ids {
|
for segment_id in before_merge_segment_ids {
|
||||||
target_register.remove_segment(segment_id);
|
target_register.remove_segment(segment_id);
|
||||||
}
|
}
|
||||||
target_register.add_segment_entry(after_merge_segment_entry);
|
target_register.add_segment_entry(after_merge_segment_entry);
|
||||||
Ok(segments_status)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {
|
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {
|
||||||
|
|||||||
@@ -6,34 +6,39 @@ use crate::core::SegmentId;
|
|||||||
use crate::core::SegmentMeta;
|
use crate::core::SegmentMeta;
|
||||||
use crate::core::SerializableSegment;
|
use crate::core::SerializableSegment;
|
||||||
use crate::core::META_FILEPATH;
|
use crate::core::META_FILEPATH;
|
||||||
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult};
|
use crate::directory::{Directory, DirectoryClone};
|
||||||
|
use crate::error::TantivyError;
|
||||||
use crate::indexer::delete_queue::DeleteCursor;
|
use crate::indexer::delete_queue::DeleteCursor;
|
||||||
use crate::indexer::index_writer::advance_deletes;
|
use crate::indexer::index_writer::advance_deletes;
|
||||||
use crate::indexer::merge_operation::MergeOperationInventory;
|
use crate::indexer::merge_operation::MergeOperationInventory;
|
||||||
use crate::indexer::merger::IndexMerger;
|
use crate::indexer::merger::IndexMerger;
|
||||||
use crate::indexer::segment_manager::SegmentsStatus;
|
|
||||||
use crate::indexer::stamper::Stamper;
|
use crate::indexer::stamper::Stamper;
|
||||||
|
use crate::indexer::MergeOperation;
|
||||||
use crate::indexer::SegmentEntry;
|
use crate::indexer::SegmentEntry;
|
||||||
use crate::indexer::SegmentSerializer;
|
use crate::indexer::SegmentSerializer;
|
||||||
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
use crate::indexer::{DefaultMergePolicy, MergePolicy};
|
||||||
use crate::indexer::{MergeCandidate, MergeOperation};
|
|
||||||
use crate::schema::Schema;
|
use crate::schema::Schema;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use futures::channel::oneshot;
|
use crate::Result;
|
||||||
use futures::executor::{ThreadPool, ThreadPoolBuilder};
|
use futures::oneshot;
|
||||||
use futures::future::Future;
|
use futures::sync::oneshot::Receiver;
|
||||||
use futures::future::TryFutureExt;
|
use futures::Future;
|
||||||
|
use futures_cpupool::Builder as CpuPoolBuilder;
|
||||||
|
use futures_cpupool::CpuFuture;
|
||||||
|
use futures_cpupool::CpuPool;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use std::borrow::BorrowMut;
|
use std::borrow::BorrowMut;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::ops::Deref;
|
use std::mem;
|
||||||
|
use std::ops::DerefMut;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
use std::thread;
|
||||||
const NUM_MERGE_THREADS: usize = 4;
|
use std::thread::JoinHandle;
|
||||||
|
|
||||||
/// Save the index meta file.
|
/// Save the index meta file.
|
||||||
/// This operation is atomic :
|
/// This operation is atomic :
|
||||||
@@ -44,7 +49,7 @@ const NUM_MERGE_THREADS: usize = 4;
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> {
|
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> {
|
||||||
save_metas(
|
save_metas(
|
||||||
&IndexMeta {
|
&IndexMeta {
|
||||||
segments: Vec::new(),
|
segments: Vec::new(),
|
||||||
@@ -65,7 +70,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::R
|
|||||||
/// and flushed.
|
/// and flushed.
|
||||||
///
|
///
|
||||||
/// This method is not part of tantivy's public API
|
/// This method is not part of tantivy's public API
|
||||||
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> {
|
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
|
||||||
info!("save metas");
|
info!("save metas");
|
||||||
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
let mut buffer = serde_json::to_vec_pretty(metas)?;
|
||||||
// Just adding a new line at the end of the buffer.
|
// Just adding a new line at the end of the buffer.
|
||||||
@@ -84,38 +89,21 @@ fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result
|
|||||||
// We voluntarily pass a merge_operation ref to guarantee that
|
// We voluntarily pass a merge_operation ref to guarantee that
|
||||||
// the merge_operation is alive during the process
|
// the merge_operation is alive during the process
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>);
|
pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
|
||||||
|
|
||||||
impl Deref for SegmentUpdater {
|
fn perform_merge(
|
||||||
type Target = InnerSegmentUpdater;
|
merge_operation: &MergeOperation,
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn garbage_collect_files(
|
|
||||||
segment_updater: SegmentUpdater,
|
|
||||||
) -> crate::Result<GarbageCollectionResult> {
|
|
||||||
info!("Running garbage collection");
|
|
||||||
let mut index = segment_updater.index.clone();
|
|
||||||
index
|
|
||||||
.directory_mut()
|
|
||||||
.garbage_collect(move || segment_updater.list_files())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Merges a list of segments the list of segment givens in the `segment_entries`.
|
|
||||||
/// This function happens in the calling thread and is computationally expensive.
|
|
||||||
fn merge(
|
|
||||||
index: &Index,
|
index: &Index,
|
||||||
mut segment_entries: Vec<SegmentEntry>,
|
mut segment_entries: Vec<SegmentEntry>,
|
||||||
target_opstamp: Opstamp,
|
) -> Result<SegmentEntry> {
|
||||||
) -> crate::Result<SegmentEntry> {
|
let target_opstamp = merge_operation.target_opstamp();
|
||||||
|
|
||||||
// first we need to apply deletes to our segment.
|
// first we need to apply deletes to our segment.
|
||||||
let mut merged_segment = index.new_segment();
|
let mut merged_segment = index.new_segment();
|
||||||
|
|
||||||
// First we apply all of the delet to the merged segment, up to the target opstamp.
|
// TODO add logging
|
||||||
|
let schema = index.schema();
|
||||||
|
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = index.segment(segment_entry.meta().clone());
|
let segment = index.segment(segment_entry.meta().clone());
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||||
@@ -129,19 +117,22 @@ fn merge(
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// An IndexMerger is like a "view" of our merged segments.
|
// An IndexMerger is like a "view" of our merged segments.
|
||||||
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?;
|
let merger: IndexMerger = IndexMerger::open(schema, &segments[..])?;
|
||||||
|
|
||||||
|
// ... we just serialize this index merger in our new segment
|
||||||
|
// to merge the two segments.
|
||||||
|
|
||||||
// ... we just serialize this index merger in our new segment to merge the two segments.
|
|
||||||
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
|
||||||
|
|
||||||
let num_docs = merger.write(segment_serializer)?;
|
let num_docs = merger.write(segment_serializer)?;
|
||||||
|
|
||||||
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
|
||||||
|
|
||||||
Ok(SegmentEntry::new(segment_meta, delete_cursor, None))
|
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
|
||||||
|
Ok(after_merge_segment_entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct InnerSegmentUpdater {
|
struct InnerSegmentUpdater {
|
||||||
// we keep a copy of the current active IndexMeta to
|
// we keep a copy of the current active IndexMeta to
|
||||||
// avoid loading the file everytime we need it in the
|
// avoid loading the file everytime we need it in the
|
||||||
// `SegmentUpdater`.
|
// `SegmentUpdater`.
|
||||||
@@ -149,12 +140,12 @@ pub(crate) struct InnerSegmentUpdater {
|
|||||||
// This should be up to date as all update happen through
|
// This should be up to date as all update happen through
|
||||||
// the unique active `SegmentUpdater`.
|
// the unique active `SegmentUpdater`.
|
||||||
active_metas: RwLock<Arc<IndexMeta>>,
|
active_metas: RwLock<Arc<IndexMeta>>,
|
||||||
pool: ThreadPool,
|
pool: CpuPool,
|
||||||
merge_thread_pool: ThreadPool,
|
|
||||||
|
|
||||||
index: Index,
|
index: Index,
|
||||||
segment_manager: SegmentManager,
|
segment_manager: SegmentManager,
|
||||||
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
|
||||||
|
merging_thread_id: AtomicUsize,
|
||||||
|
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
|
||||||
killed: AtomicBool,
|
killed: AtomicBool,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
merge_operations: MergeOperationInventory,
|
merge_operations: MergeOperationInventory,
|
||||||
@@ -165,31 +156,22 @@ impl SegmentUpdater {
|
|||||||
index: Index,
|
index: Index,
|
||||||
stamper: Stamper,
|
stamper: Stamper,
|
||||||
delete_cursor: &DeleteCursor,
|
delete_cursor: &DeleteCursor,
|
||||||
) -> crate::Result<SegmentUpdater> {
|
) -> Result<SegmentUpdater> {
|
||||||
let segments = index.searchable_segment_metas()?;
|
let segments = index.searchable_segment_metas()?;
|
||||||
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
|
||||||
let pool = ThreadPoolBuilder::new()
|
let pool = CpuPoolBuilder::new()
|
||||||
.name_prefix("segment_updater")
|
.name_prefix("segment_updater")
|
||||||
.pool_size(1)
|
.pool_size(1)
|
||||||
.create()
|
.create();
|
||||||
.map_err(|_| {
|
|
||||||
crate::Error::SystemError("Failed to spawn segment updater thread".to_string())
|
|
||||||
})?;
|
|
||||||
let merge_thread_pool = ThreadPoolBuilder::new()
|
|
||||||
.name_prefix("merge_thread")
|
|
||||||
.pool_size(NUM_MERGE_THREADS)
|
|
||||||
.create()
|
|
||||||
.map_err(|_| {
|
|
||||||
crate::Error::SystemError("Failed to spawn segment merging thread".to_string())
|
|
||||||
})?;
|
|
||||||
let index_meta = index.load_metas()?;
|
let index_meta = index.load_metas()?;
|
||||||
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
|
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
|
||||||
active_metas: RwLock::new(Arc::new(index_meta)),
|
active_metas: RwLock::new(Arc::new(index_meta)),
|
||||||
pool,
|
pool,
|
||||||
merge_thread_pool,
|
|
||||||
index,
|
index,
|
||||||
segment_manager,
|
segment_manager,
|
||||||
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
|
||||||
|
merging_thread_id: AtomicUsize::default(),
|
||||||
|
merging_threads: RwLock::new(HashMap::new()),
|
||||||
killed: AtomicBool::new(false),
|
killed: AtomicBool::new(false),
|
||||||
stamper,
|
stamper,
|
||||||
merge_operations: Default::default(),
|
merge_operations: Default::default(),
|
||||||
@@ -197,82 +179,67 @@ impl SegmentUpdater {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
|
||||||
self.merge_policy.read().unwrap().clone()
|
self.0.merge_policy.read().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
|
||||||
let arc_merge_policy = Arc::new(merge_policy);
|
let arc_merge_policy = Arc::new(merge_policy);
|
||||||
*self.merge_policy.write().unwrap() = arc_merge_policy;
|
*self.0.merge_policy.write().unwrap() = arc_merge_policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn schedule_future<T: 'static + Send, F: Future<Output = crate::Result<T>> + 'static + Send>(
|
fn get_merging_thread_id(&self) -> usize {
|
||||||
|
self.0.merging_thread_id.fetch_add(1, Ordering::SeqCst)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>(
|
||||||
&self,
|
&self,
|
||||||
f: F,
|
f: F,
|
||||||
) -> impl Future<Output = crate::Result<T>> {
|
) -> CpuFuture<T, TantivyError> {
|
||||||
let (sender, receiver) = oneshot::channel();
|
let me_clone = self.clone();
|
||||||
if self.is_alive() {
|
self.0.pool.spawn_fn(move || Ok(f(me_clone)))
|
||||||
self.pool.spawn_ok(async move {
|
|
||||||
let _ = sender.send(f.await);
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
let _ = sender.send(Err(crate::TantivyError::SystemError(
|
|
||||||
"Segment updater killed".to_string(),
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
receiver.unwrap_or_else(|_| {
|
|
||||||
let err_msg =
|
|
||||||
"A segment_updater future did not success. This should never happen.".to_string();
|
|
||||||
Err(crate::Error::SystemError(err_msg))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn schedule_add_segment(
|
pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
|
||||||
&self,
|
self.run_async(|segment_updater| {
|
||||||
segment_entry: SegmentEntry,
|
segment_updater.0.segment_manager.add_segment(segment_entry);
|
||||||
) -> impl Future<Output = crate::Result<()>> {
|
segment_updater.consider_merge_options();
|
||||||
let segment_updater = self.clone();
|
true
|
||||||
self.schedule_future(async move {
|
|
||||||
segment_updater.segment_manager.add_segment(segment_entry);
|
|
||||||
segment_updater.consider_merge_options().await;
|
|
||||||
Ok(())
|
|
||||||
})
|
})
|
||||||
|
.forget();
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Orders `SegmentManager` to remove all segments
|
/// Orders `SegmentManager` to remove all segments
|
||||||
pub(crate) fn remove_all_segments(&self) {
|
pub(crate) fn remove_all_segments(&self) {
|
||||||
self.segment_manager.remove_all_segments();
|
self.0.segment_manager.remove_all_segments();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn kill(&mut self) {
|
pub fn kill(&mut self) {
|
||||||
self.killed.store(true, Ordering::Release);
|
self.0.killed.store(true, Ordering::Release);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_alive(&self) -> bool {
|
pub fn is_alive(&self) -> bool {
|
||||||
!self.killed.load(Ordering::Acquire)
|
!self.0.killed.load(Ordering::Acquire)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply deletes up to the target opstamp to all segments.
|
/// Apply deletes up to the target opstamp to all segments.
|
||||||
///
|
///
|
||||||
/// The method returns copies of the segment entries,
|
/// The method returns copies of the segment entries,
|
||||||
/// updated with the delete information.
|
/// updated with the delete information.
|
||||||
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> {
|
fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> {
|
||||||
let mut segment_entries = self.segment_manager.segment_entries();
|
let mut segment_entries = self.0.segment_manager.segment_entries();
|
||||||
for segment_entry in &mut segment_entries {
|
for segment_entry in &mut segment_entries {
|
||||||
let segment = self.index.segment(segment_entry.meta().clone());
|
let segment = self.0.index.segment(segment_entry.meta().clone());
|
||||||
advance_deletes(segment, segment_entry, target_opstamp)?;
|
advance_deletes(segment, segment_entry, target_opstamp)?;
|
||||||
}
|
}
|
||||||
Ok(segment_entries)
|
Ok(segment_entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_metas(
|
pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) {
|
||||||
&self,
|
|
||||||
opstamp: Opstamp,
|
|
||||||
commit_message: Option<String>,
|
|
||||||
) -> crate::Result<()> {
|
|
||||||
if self.is_alive() {
|
if self.is_alive() {
|
||||||
let index = &self.index;
|
let index = &self.0.index;
|
||||||
let directory = index.directory();
|
let directory = index.directory();
|
||||||
let mut commited_segment_metas = self.segment_manager.committed_segment_metas();
|
let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas();
|
||||||
|
|
||||||
// We sort segment_readers by number of documents.
|
// We sort segment_readers by number of documents.
|
||||||
// This is an heuristic to make multithreading more efficient.
|
// This is an heuristic to make multithreading more efficient.
|
||||||
@@ -294,18 +261,16 @@ impl SegmentUpdater {
|
|||||||
opstamp,
|
opstamp,
|
||||||
payload: commit_message,
|
payload: commit_message,
|
||||||
};
|
};
|
||||||
// TODO add context to the error.
|
save_metas(&index_meta, directory.box_clone().borrow_mut())
|
||||||
save_metas(&index_meta, directory.box_clone().borrow_mut())?;
|
.expect("Could not save metas.");
|
||||||
self.store_meta(&index_meta);
|
self.store_meta(&index_meta);
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn schedule_garbage_collect(
|
pub fn garbage_collect_files(&self) -> CpuFuture<(), TantivyError> {
|
||||||
&self,
|
self.run_async(move |segment_updater| {
|
||||||
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> {
|
segment_updater.garbage_collect_files_exec();
|
||||||
let garbage_collect_future = garbage_collect_files(self.clone());
|
})
|
||||||
self.schedule_future(garbage_collect_future)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// List the files that are useful to the index.
|
/// List the files that are useful to the index.
|
||||||
@@ -313,130 +278,148 @@ impl SegmentUpdater {
|
|||||||
/// This does not include lock files, or files that are obsolete
|
/// This does not include lock files, or files that are obsolete
|
||||||
/// but have not yet been deleted by the garbage collector.
|
/// but have not yet been deleted by the garbage collector.
|
||||||
fn list_files(&self) -> HashSet<PathBuf> {
|
fn list_files(&self) -> HashSet<PathBuf> {
|
||||||
let mut files: HashSet<PathBuf> = self
|
let mut files = HashSet::new();
|
||||||
.index
|
|
||||||
.list_all_segment_metas()
|
|
||||||
.into_iter()
|
|
||||||
.flat_map(|segment_meta| segment_meta.list_files())
|
|
||||||
.collect();
|
|
||||||
files.insert(META_FILEPATH.to_path_buf());
|
files.insert(META_FILEPATH.to_path_buf());
|
||||||
|
for segment_meta in self.0.index.list_all_segment_metas() {
|
||||||
|
files.extend(segment_meta.list_files());
|
||||||
|
}
|
||||||
files
|
files
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn schedule_commit(
|
fn garbage_collect_files_exec(&self) {
|
||||||
&self,
|
info!("Running garbage collection");
|
||||||
opstamp: Opstamp,
|
let mut index = self.0.index.clone();
|
||||||
payload: Option<String>,
|
index.directory_mut().garbage_collect(|| self.list_files());
|
||||||
) -> impl Future<Output = crate::Result<()>> {
|
}
|
||||||
let segment_updater: SegmentUpdater = self.clone();
|
|
||||||
self.schedule_future(async move {
|
pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> {
|
||||||
let segment_entries = segment_updater.purge_deletes(opstamp)?;
|
self.run_async(move |segment_updater| {
|
||||||
segment_updater.segment_manager.commit(segment_entries);
|
if segment_updater.is_alive() {
|
||||||
segment_updater.save_metas(opstamp, payload)?;
|
let segment_entries = segment_updater
|
||||||
let _ = garbage_collect_files(segment_updater.clone()).await;
|
.purge_deletes(opstamp)
|
||||||
segment_updater.consider_merge_options().await;
|
.expect("Failed purge deletes");
|
||||||
Ok(())
|
segment_updater.0.segment_manager.commit(segment_entries);
|
||||||
|
segment_updater.save_metas(opstamp, payload);
|
||||||
|
segment_updater.garbage_collect_files_exec();
|
||||||
|
segment_updater.consider_merge_options();
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
.wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
|
||||||
|
let commit_opstamp = self.load_metas().opstamp;
|
||||||
|
let merge_operation = MergeOperation::new(
|
||||||
|
&self.0.merge_operations,
|
||||||
|
commit_opstamp,
|
||||||
|
segment_ids.to_vec(),
|
||||||
|
);
|
||||||
|
self.run_async(move |segment_updater| segment_updater.start_merge_impl(merge_operation))
|
||||||
|
.wait()?
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_meta(&self, index_meta: &IndexMeta) {
|
fn store_meta(&self, index_meta: &IndexMeta) {
|
||||||
*self.active_metas.write().unwrap() = Arc::new(index_meta.clone());
|
*self.0.active_metas.write().unwrap() = Arc::new(index_meta.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_metas(&self) -> Arc<IndexMeta> {
|
fn load_metas(&self) -> Arc<IndexMeta> {
|
||||||
self.active_metas.read().unwrap().clone()
|
self.0.active_metas.read().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
|
|
||||||
let commit_opstamp = self.load_metas().opstamp;
|
|
||||||
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Starts a merge operation. This function will block until the merge operation is effectively
|
|
||||||
// started. Note that it does not wait for the merge to terminate.
|
|
||||||
// The calling thread should not be block for a long time, as this only involve waiting for the
|
|
||||||
// `SegmentUpdater` queue which in turns only contains lightweight operations.
|
|
||||||
//
|
|
||||||
// The merge itself happens on a different thread.
|
|
||||||
//
|
|
||||||
// When successful, this function returns a `Future` for a `Result<SegmentMeta>` that represents
|
|
||||||
// the actual outcome of the merge operation.
|
|
||||||
//
|
|
||||||
// It returns an error if for some reason the merge operation could not be started.
|
|
||||||
//
|
|
||||||
// At this point an error is not necessarily the sign of a malfunction.
|
|
||||||
// (e.g. A rollback could have happened, between the instant when the merge operaiton was
|
|
||||||
// suggested and the moment when it ended up being executed.)
|
|
||||||
//
|
|
||||||
// `segment_ids` is required to be non-empty.
|
// `segment_ids` is required to be non-empty.
|
||||||
pub fn start_merge(
|
fn start_merge_impl(&self, merge_operation: MergeOperation) -> Result<Receiver<SegmentMeta>> {
|
||||||
&self,
|
|
||||||
merge_operation: MergeOperation,
|
|
||||||
) -> crate::Result<impl Future<Output = crate::Result<SegmentMeta>>> {
|
|
||||||
assert!(
|
assert!(
|
||||||
!merge_operation.segment_ids().is_empty(),
|
!merge_operation.segment_ids().is_empty(),
|
||||||
"Segment_ids cannot be empty."
|
"Segment_ids cannot be empty."
|
||||||
);
|
);
|
||||||
|
|
||||||
let segment_updater = self.clone();
|
let segment_updater_clone = self.clone();
|
||||||
let segment_entries: Vec<SegmentEntry> = self
|
let segment_entries: Vec<SegmentEntry> = self
|
||||||
|
.0
|
||||||
.segment_manager
|
.segment_manager
|
||||||
.start_merge(merge_operation.segment_ids())?;
|
.start_merge(merge_operation.segment_ids())?;
|
||||||
|
|
||||||
info!("Starting merge - {:?}", merge_operation.segment_ids());
|
// let segment_ids_vec = merge_operation.segment_ids.to_vec();
|
||||||
|
|
||||||
let (merging_future_send, merging_future_recv) =
|
let merging_thread_id = self.get_merging_thread_id();
|
||||||
oneshot::channel::<crate::Result<SegmentMeta>>();
|
info!(
|
||||||
|
"Starting merge thread #{} - {:?}",
|
||||||
|
merging_thread_id,
|
||||||
|
merge_operation.segment_ids()
|
||||||
|
);
|
||||||
|
let (merging_future_send, merging_future_recv) = oneshot();
|
||||||
|
|
||||||
self.merge_thread_pool.spawn_ok(async move {
|
// first we need to apply deletes to our segment.
|
||||||
// The fact that `merge_operation` is moved here is important.
|
let merging_join_handle = thread::Builder::new()
|
||||||
// Its lifetime is used to track how many merging thread are currently running,
|
.name(format!("mergingthread-{}", merging_thread_id))
|
||||||
// as well as which segment is currently in merge and therefore should not be
|
.spawn(move || {
|
||||||
// candidate for another merge.
|
// first we need to apply deletes to our segment.
|
||||||
match merge(
|
let merge_result = perform_merge(
|
||||||
&segment_updater.index,
|
&merge_operation,
|
||||||
segment_entries,
|
&segment_updater_clone.0.index,
|
||||||
merge_operation.target_opstamp(),
|
segment_entries,
|
||||||
) {
|
);
|
||||||
Ok(after_merge_segment_entry) => {
|
|
||||||
let segment_meta = segment_updater
|
match merge_result {
|
||||||
.end_merge(merge_operation, after_merge_segment_entry)
|
Ok(after_merge_segment_entry) => {
|
||||||
.await;
|
let merged_segment_meta = after_merge_segment_entry.meta().clone();
|
||||||
let _send_result = merging_future_send.send(segment_meta);
|
segment_updater_clone
|
||||||
}
|
.end_merge(merge_operation, after_merge_segment_entry)
|
||||||
Err(e) => {
|
.expect("Segment updater thread is corrupted.");
|
||||||
warn!(
|
|
||||||
"Merge of {:?} was cancelled: {:?}",
|
// the future may fail if the listener of the oneshot future
|
||||||
merge_operation.segment_ids().to_vec(),
|
// has been destroyed.
|
||||||
e
|
//
|
||||||
);
|
// This is not a problem here, so we just ignore any
|
||||||
// ... cancel merge
|
// possible error.
|
||||||
if cfg!(test) {
|
let _merging_future_res = merging_future_send.send(merged_segment_meta);
|
||||||
panic!("Merge failed.");
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Merge of {:?} was cancelled: {:?}",
|
||||||
|
merge_operation.segment_ids(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
// ... cancel merge
|
||||||
|
if cfg!(test) {
|
||||||
|
panic!("Merge failed.");
|
||||||
|
}
|
||||||
|
// As `merge_operation` will be dropped, the segment in merge state will
|
||||||
|
// be available for merge again.
|
||||||
|
// `merging_future_send` will be dropped, sending an error to the future.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
segment_updater_clone
|
||||||
});
|
.0
|
||||||
|
.merging_threads
|
||||||
Ok(merging_future_recv
|
.write()
|
||||||
.unwrap_or_else(|_| Err(crate::Error::SystemError("Merge failed".to_string()))))
|
.unwrap()
|
||||||
|
.remove(&merging_thread_id);
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.expect("Failed to spawn a thread.");
|
||||||
|
self.0
|
||||||
|
.merging_threads
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.insert(merging_thread_id, merging_join_handle);
|
||||||
|
Ok(merging_future_recv)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn consider_merge_options(&self) {
|
fn consider_merge_options(&self) {
|
||||||
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge();
|
let merge_segment_ids: HashSet<SegmentId> = self.0.merge_operations.segment_in_merge();
|
||||||
let (committed_segments, uncommitted_segments) =
|
let (committed_segments, uncommitted_segments) =
|
||||||
get_mergeable_segments(&merge_segment_ids, &self.segment_manager);
|
get_mergeable_segments(&merge_segment_ids, &self.0.segment_manager);
|
||||||
|
|
||||||
// Committed segments cannot be merged with uncommitted_segments.
|
// Committed segments cannot be merged with uncommitted_segments.
|
||||||
// We therefore consider merges using these two sets of segments independently.
|
// We therefore consider merges using these two sets of segments independently.
|
||||||
let merge_policy = self.get_merge_policy();
|
let merge_policy = self.get_merge_policy();
|
||||||
|
|
||||||
let current_opstamp = self.stamper.stamp();
|
let current_opstamp = self.0.stamper.stamp();
|
||||||
let mut merge_candidates: Vec<MergeOperation> = merge_policy
|
let mut merge_candidates: Vec<MergeOperation> = merge_policy
|
||||||
.compute_merge_candidates(&uncommitted_segments)
|
.compute_merge_candidates(&uncommitted_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate| {
|
.map(|merge_candidate| {
|
||||||
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0)
|
MergeOperation::new(&self.0.merge_operations, current_opstamp, merge_candidate.0)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -444,18 +427,25 @@ impl SegmentUpdater {
|
|||||||
let committed_merge_candidates = merge_policy
|
let committed_merge_candidates = merge_policy
|
||||||
.compute_merge_candidates(&committed_segments)
|
.compute_merge_candidates(&committed_segments)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|merge_candidate: MergeCandidate| {
|
.map(|merge_candidate| {
|
||||||
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0)
|
MergeOperation::new(&self.0.merge_operations, commit_opstamp, merge_candidate.0)
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
merge_candidates.extend(committed_merge_candidates.into_iter());
|
merge_candidates.extend(committed_merge_candidates.into_iter());
|
||||||
|
|
||||||
for merge_operation in merge_candidates {
|
for merge_operation in merge_candidates {
|
||||||
if let Err(err) = self.start_merge(merge_operation) {
|
match self.start_merge_impl(merge_operation) {
|
||||||
warn!(
|
Ok(merge_future) => {
|
||||||
"Starting the merge failed for the following reason. This is not fatal. {}",
|
if let Err(e) = merge_future.fuse().poll() {
|
||||||
err
|
error!("The merge task failed quickly after starting: {:?}", e);
|
||||||
);
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!(
|
||||||
|
"Starting the merge failed for the following reason. This is not fatal. {}",
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -464,17 +454,15 @@ impl SegmentUpdater {
|
|||||||
&self,
|
&self,
|
||||||
merge_operation: MergeOperation,
|
merge_operation: MergeOperation,
|
||||||
mut after_merge_segment_entry: SegmentEntry,
|
mut after_merge_segment_entry: SegmentEntry,
|
||||||
) -> impl Future<Output = crate::Result<SegmentMeta>> {
|
) -> Result<()> {
|
||||||
let segment_updater = self.clone();
|
self.run_async(move |segment_updater| {
|
||||||
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
|
|
||||||
let end_merge_future = self.schedule_future(async move {
|
|
||||||
info!("End merge {:?}", after_merge_segment_entry.meta());
|
info!("End merge {:?}", after_merge_segment_entry.meta());
|
||||||
{
|
{
|
||||||
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
|
||||||
if let Some(delete_operation) = delete_cursor.get() {
|
if let Some(delete_operation) = delete_cursor.get() {
|
||||||
let committed_opstamp = segment_updater.load_metas().opstamp;
|
let committed_opstamp = segment_updater.load_metas().opstamp;
|
||||||
if delete_operation.opstamp < committed_opstamp {
|
if delete_operation.opstamp < committed_opstamp {
|
||||||
let index = &segment_updater.index;
|
let index = &segment_updater.0.index;
|
||||||
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
let segment = index.segment(after_merge_segment_entry.meta().clone());
|
||||||
if let Err(e) = advance_deletes(
|
if let Err(e) = advance_deletes(
|
||||||
segment,
|
segment,
|
||||||
@@ -492,26 +480,21 @@ impl SegmentUpdater {
|
|||||||
// ... cancel merge
|
// ... cancel merge
|
||||||
// `merge_operations` are tracked. As it is dropped, the
|
// `merge_operations` are tracked. As it is dropped, the
|
||||||
// the segment_ids will be available again for merge.
|
// the segment_ids will be available again for merge.
|
||||||
return Err(e);
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let previous_metas = segment_updater.load_metas();
|
let previous_metas = segment_updater.load_metas();
|
||||||
let segments_status = segment_updater
|
segment_updater
|
||||||
|
.0
|
||||||
.segment_manager
|
.segment_manager
|
||||||
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry)?;
|
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
|
||||||
|
segment_updater.consider_merge_options();
|
||||||
if segments_status == SegmentsStatus::Committed {
|
segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
|
||||||
segment_updater
|
|
||||||
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
segment_updater.consider_merge_options().await;
|
|
||||||
} // we drop all possible handle to a now useless `SegmentMeta`.
|
} // we drop all possible handle to a now useless `SegmentMeta`.
|
||||||
let _ = garbage_collect_files(segment_updater).await;
|
segment_updater.garbage_collect_files_exec();
|
||||||
Ok(())
|
})
|
||||||
});
|
.wait()
|
||||||
end_merge_future.map_ok(|_| after_merge_segment_meta)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wait for current merging threads.
|
/// Wait for current merging threads.
|
||||||
@@ -529,9 +512,26 @@ impl SegmentUpdater {
|
|||||||
///
|
///
|
||||||
/// Obsolete files will eventually be cleaned up
|
/// Obsolete files will eventually be cleaned up
|
||||||
/// by the directory garbage collector.
|
/// by the directory garbage collector.
|
||||||
pub fn wait_merging_thread(&self) -> crate::Result<()> {
|
pub fn wait_merging_thread(&self) -> Result<()> {
|
||||||
self.merge_operations.wait_until_empty();
|
loop {
|
||||||
Ok(())
|
let merging_threads: HashMap<usize, JoinHandle<Result<()>>> = {
|
||||||
|
let mut merging_threads = self.0.merging_threads.write().unwrap();
|
||||||
|
mem::replace(merging_threads.deref_mut(), HashMap::new())
|
||||||
|
};
|
||||||
|
if merging_threads.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
debug!("wait merging thread {}", merging_threads.len());
|
||||||
|
for (_, merging_thread_handle) in merging_threads {
|
||||||
|
merging_thread_handle
|
||||||
|
.join()
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(|_| TantivyError::ErrorInThread("Merging thread failed.".into()))?;
|
||||||
|
}
|
||||||
|
// Our merging thread may have queued their completed merged segment.
|
||||||
|
// Let's wait for that too.
|
||||||
|
self.run_async(move |_| {}).wait()?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -687,6 +687,7 @@ mod tests {
|
|||||||
index_writer.segment_updater().remove_all_segments();
|
index_writer.segment_updater().remove_all_segments();
|
||||||
let seg_vec = index_writer
|
let seg_vec = index_writer
|
||||||
.segment_updater()
|
.segment_updater()
|
||||||
|
.0
|
||||||
.segment_manager
|
.segment_manager
|
||||||
.segment_entries();
|
.segment_entries();
|
||||||
assert!(seg_vec.is_empty());
|
assert!(seg_vec.is_empty());
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ use crate::schema::Schema;
|
|||||||
use crate::schema::Term;
|
use crate::schema::Term;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::schema::{Field, FieldEntry};
|
use crate::schema::{Field, FieldEntry};
|
||||||
use crate::tokenizer::{BoxTokenStream, PreTokenizedStream};
|
use crate::tokenizer::BoxedTokenizer;
|
||||||
use crate::tokenizer::{FacetTokenizer, TextAnalyzer};
|
use crate::tokenizer::FacetTokenizer;
|
||||||
use crate::tokenizer::{TokenStreamChain, Tokenizer};
|
use crate::tokenizer::{TokenStream, Tokenizer};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
@@ -49,7 +49,7 @@ pub struct SegmentWriter {
|
|||||||
fast_field_writers: FastFieldsWriter,
|
fast_field_writers: FastFieldsWriter,
|
||||||
fieldnorms_writer: FieldNormsWriter,
|
fieldnorms_writer: FieldNormsWriter,
|
||||||
doc_opstamps: Vec<Opstamp>,
|
doc_opstamps: Vec<Opstamp>,
|
||||||
tokenizers: Vec<Option<TextAnalyzer>>,
|
tokenizers: Vec<Option<BoxedTokenizer>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SegmentWriter {
|
impl SegmentWriter {
|
||||||
@@ -158,43 +158,26 @@ impl SegmentWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::Str(_) => {
|
FieldType::Str(_) => {
|
||||||
let mut token_streams: Vec<BoxTokenStream> = vec![];
|
let num_tokens = if let Some(ref mut tokenizer) =
|
||||||
let mut offsets = vec![];
|
self.tokenizers[field.field_id() as usize]
|
||||||
let mut total_offset = 0;
|
{
|
||||||
|
let texts: Vec<&str> = field_values
|
||||||
for field_value in field_values {
|
.iter()
|
||||||
match field_value.value() {
|
.flat_map(|field_value| match *field_value.value() {
|
||||||
Value::PreTokStr(tok_str) => {
|
Value::Str(ref text) => Some(text.as_str()),
|
||||||
offsets.push(total_offset);
|
_ => None,
|
||||||
if let Some(last_token) = tok_str.tokens.last() {
|
})
|
||||||
total_offset += last_token.offset_to;
|
.collect();
|
||||||
}
|
if texts.is_empty() {
|
||||||
|
0
|
||||||
token_streams
|
} else {
|
||||||
.push(PreTokenizedStream::from(tok_str.clone()).into());
|
let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
|
||||||
}
|
self.multifield_postings
|
||||||
Value::Str(ref text) => {
|
.index_text(doc_id, field, &mut token_stream)
|
||||||
if let Some(ref mut tokenizer) =
|
|
||||||
self.tokenizers[field.field_id() as usize]
|
|
||||||
{
|
|
||||||
offsets.push(total_offset);
|
|
||||||
total_offset += text.len();
|
|
||||||
|
|
||||||
token_streams.push(tokenizer.token_stream(text));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
let num_tokens = if token_streams.is_empty() {
|
|
||||||
0
|
|
||||||
} else {
|
} else {
|
||||||
let mut token_stream = TokenStreamChain::new(offsets, token_streams);
|
0
|
||||||
self.multifield_postings
|
|
||||||
.index_text(doc_id, field, &mut token_stream)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
self.fieldnorms_writer.record(doc_id, field, num_tokens);
|
||||||
}
|
}
|
||||||
FieldType::U64(ref int_option) => {
|
FieldType::U64(ref int_option) => {
|
||||||
@@ -247,7 +230,6 @@ impl SegmentWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
|
||||||
doc.prepare_for_store();
|
|
||||||
let doc_writer = self.segment_serializer.get_store_writer();
|
let doc_writer = self.segment_serializer.get_store_writer();
|
||||||
doc_writer.store(&doc)?;
|
doc_writer.store(&doc)?;
|
||||||
self.max_doc += 1;
|
self.max_doc += 1;
|
||||||
|
|||||||
@@ -1,76 +1,18 @@
|
|||||||
use crate::Opstamp;
|
use crate::Opstamp;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[cfg(not(target_arch = "arm"))]
|
|
||||||
mod atomic_impl {
|
|
||||||
|
|
||||||
use crate::Opstamp;
|
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct AtomicU64Wrapper(AtomicU64);
|
|
||||||
|
|
||||||
impl AtomicU64Wrapper {
|
|
||||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
|
||||||
AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 {
|
|
||||||
self.0.fetch_add(val as u64, order) as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn revert(&self, val: u64, order: Ordering) -> u64 {
|
|
||||||
self.0.store(val, order);
|
|
||||||
val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(target_arch = "arm")]
|
|
||||||
mod atomic_impl {
|
|
||||||
|
|
||||||
use crate::Opstamp;
|
|
||||||
/// Under other architecture, we rely on a mutex.
|
|
||||||
use std::sync::atomic::Ordering;
|
|
||||||
use std::sync::RwLock;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct AtomicU64Wrapper(RwLock<u64>);
|
|
||||||
|
|
||||||
impl AtomicU64Wrapper {
|
|
||||||
pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper {
|
|
||||||
AtomicU64Wrapper(RwLock::new(first_opstamp))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fetch_add(&self, incr: u64, _order: Ordering) -> u64 {
|
|
||||||
let mut lock = self.0.write().unwrap();
|
|
||||||
let previous_val = *lock;
|
|
||||||
*lock = previous_val + incr;
|
|
||||||
previous_val
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn revert(&self, val: u64, _order: Ordering) -> u64 {
|
|
||||||
let mut lock = self.0.write().unwrap();
|
|
||||||
*lock = val;
|
|
||||||
val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use self::atomic_impl::AtomicU64Wrapper;
|
|
||||||
|
|
||||||
/// Stamper provides Opstamps, which is just an auto-increment id to label
|
/// Stamper provides Opstamps, which is just an auto-increment id to label
|
||||||
/// an operation.
|
/// an operation.
|
||||||
///
|
///
|
||||||
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
|
/// Cloning does not "fork" the stamp generation. The stamper actually wraps an `Arc`.
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct Stamper(Arc<AtomicU64Wrapper>);
|
pub struct Stamper(Arc<AtomicU64>);
|
||||||
|
|
||||||
impl Stamper {
|
impl Stamper {
|
||||||
pub fn new(first_opstamp: Opstamp) -> Stamper {
|
pub fn new(first_opstamp: Opstamp) -> Stamper {
|
||||||
Stamper(Arc::new(AtomicU64Wrapper::new(first_opstamp)))
|
Stamper(Arc::new(AtomicU64::new(first_opstamp)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stamp(&self) -> Opstamp {
|
pub fn stamp(&self) -> Opstamp {
|
||||||
@@ -89,7 +31,8 @@ impl Stamper {
|
|||||||
|
|
||||||
/// Reverts the stamper to a given `Opstamp` value and returns it
|
/// Reverts the stamper to a given `Opstamp` value and returns it
|
||||||
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
|
pub fn revert(&self, to_opstamp: Opstamp) -> Opstamp {
|
||||||
self.0.revert(to_opstamp, Ordering::SeqCst)
|
self.0.store(to_opstamp, Ordering::SeqCst);
|
||||||
|
to_opstamp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
138
src/lib.rs
Normal file → Executable file
138
src/lib.rs
Normal file → Executable file
@@ -160,68 +160,21 @@ pub use self::snippet::{Snippet, SnippetGenerator};
|
|||||||
|
|
||||||
mod docset;
|
mod docset;
|
||||||
pub use self::docset::{DocSet, SkipResult};
|
pub use self::docset::{DocSet, SkipResult};
|
||||||
|
|
||||||
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
|
||||||
pub use crate::core::SegmentComponent;
|
pub use crate::core::SegmentComponent;
|
||||||
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
|
||||||
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
pub use crate::core::{InvertedIndexReader, SegmentReader};
|
||||||
pub use crate::directory::Directory;
|
pub use crate::directory::Directory;
|
||||||
pub use crate::indexer::operation::UserOperation;
|
|
||||||
pub use crate::indexer::IndexWriter;
|
pub use crate::indexer::IndexWriter;
|
||||||
pub use crate::postings::Postings;
|
pub use crate::postings::Postings;
|
||||||
pub use crate::reader::LeasedItem;
|
pub use crate::reader::LeasedItem;
|
||||||
pub use crate::schema::{Document, Term};
|
pub use crate::schema::{Document, Term};
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use once_cell::sync::Lazy;
|
/// Expose the current version of tantivy, as well
|
||||||
|
/// whether it was compiled with the simd compression.
|
||||||
/// Index format version.
|
pub fn version() -> &'static str {
|
||||||
const INDEX_FORMAT_VERSION: u32 = 1;
|
env!("CARGO_PKG_VERSION")
|
||||||
|
|
||||||
/// Structure version for the index.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
pub struct Version {
|
|
||||||
major: u32,
|
|
||||||
minor: u32,
|
|
||||||
patch: u32,
|
|
||||||
index_format_version: u32,
|
|
||||||
store_compression: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Version {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "{}", self.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static VERSION: Lazy<Version> = Lazy::new(|| Version {
|
|
||||||
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
|
||||||
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
|
|
||||||
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
|
|
||||||
index_format_version: INDEX_FORMAT_VERSION,
|
|
||||||
store_compression: crate::store::COMPRESSION.to_string(),
|
|
||||||
});
|
|
||||||
|
|
||||||
impl ToString for Version {
|
|
||||||
fn to_string(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"tantivy v{}.{}.{}, index_format v{}, store_compression: {}",
|
|
||||||
self.major, self.minor, self.patch, self.index_format_version, self.store_compression
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static VERSION_STRING: Lazy<String> = Lazy::new(|| VERSION.to_string());
|
|
||||||
|
|
||||||
/// Expose the current version of tantivy as found in Cargo.toml during compilation.
|
|
||||||
/// eg. "0.11.0" as well as the compression scheme used in the docstore.
|
|
||||||
pub fn version() -> &'static Version {
|
|
||||||
&VERSION
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Exposes the complete version of tantivy as found in Cargo.toml during compilation as a string.
|
|
||||||
/// eg. "tantivy v0.11.0, index_format v1, store_compression: lz4".
|
|
||||||
pub fn version_string() -> &'static str {
|
|
||||||
VERSION_STRING.as_str()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Defines tantivy's merging strategy
|
/// Defines tantivy's merging strategy
|
||||||
@@ -334,18 +287,6 @@ mod tests {
|
|||||||
sample_with_seed(n, ratio, 4)
|
sample_with_seed(n, ratio, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[cfg(not(feature = "lz4"))]
|
|
||||||
fn test_version_string() {
|
|
||||||
use regex::Regex;
|
|
||||||
let regex_ptn = Regex::new(
|
|
||||||
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let version = super::version().to_string();
|
|
||||||
assert!(regex_ptn.find(&version).is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(feature = "mmap")]
|
#[cfg(feature = "mmap")]
|
||||||
fn test_indexing() {
|
fn test_indexing() {
|
||||||
@@ -941,73 +882,4 @@ mod tests {
|
|||||||
assert_eq!(fast_field_reader.get(0), 4f64)
|
assert_eq!(fast_field_reader.get(0), 4f64)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// motivated by #729
|
|
||||||
#[test]
|
|
||||||
fn test_update_via_delete_insert() {
|
|
||||||
use crate::collector::Count;
|
|
||||||
use crate::indexer::NoMergePolicy;
|
|
||||||
use crate::query::AllQuery;
|
|
||||||
use crate::SegmentId;
|
|
||||||
use futures::executor::block_on;
|
|
||||||
|
|
||||||
const DOC_COUNT: u64 = 2u64;
|
|
||||||
|
|
||||||
let mut schema_builder = SchemaBuilder::default();
|
|
||||||
let id = schema_builder.add_u64_field("id", INDEXED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
|
|
||||||
let index = Index::create_in_ram(schema.clone());
|
|
||||||
let index_reader = index.reader().unwrap();
|
|
||||||
|
|
||||||
let mut index_writer = index.writer(3_000_000).unwrap();
|
|
||||||
index_writer.set_merge_policy(Box::new(NoMergePolicy));
|
|
||||||
|
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
|
||||||
index_writer.add_document(doc!(id => doc_id));
|
|
||||||
}
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
|
||||||
let searcher = index_reader.searcher();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
searcher.search(&AllQuery, &Count).unwrap(),
|
|
||||||
DOC_COUNT as usize
|
|
||||||
);
|
|
||||||
|
|
||||||
// update the 10 elements by deleting and re-adding
|
|
||||||
for doc_id in 0u64..DOC_COUNT {
|
|
||||||
index_writer.delete_term(Term::from_field_u64(id, doc_id));
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
index_reader.reload().unwrap();
|
|
||||||
let doc = doc!(id => doc_id);
|
|
||||||
index_writer.add_document(doc);
|
|
||||||
index_writer.commit().unwrap();
|
|
||||||
index_reader.reload().unwrap();
|
|
||||||
let searcher = index_reader.searcher();
|
|
||||||
// The number of document should be stable.
|
|
||||||
assert_eq!(
|
|
||||||
searcher.search(&AllQuery, &Count).unwrap(),
|
|
||||||
DOC_COUNT as usize
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
|
||||||
let searcher = index_reader.searcher();
|
|
||||||
let segment_ids: Vec<SegmentId> = searcher
|
|
||||||
.segment_readers()
|
|
||||||
.into_iter()
|
|
||||||
.map(|reader| reader.segment_id())
|
|
||||||
.collect();
|
|
||||||
block_on(index_writer.merge(&segment_ids)).unwrap();
|
|
||||||
|
|
||||||
index_reader.reload().unwrap();
|
|
||||||
let searcher = index_reader.searcher();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
searcher.search(&AllQuery, &Count).unwrap(),
|
|
||||||
DOC_COUNT as usize
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,9 +35,9 @@
|
|||||||
/// let likes = schema_builder.add_u64_field("num_u64", FAST);
|
/// let likes = schema_builder.add_u64_field("num_u64", FAST);
|
||||||
/// let schema = schema_builder.build();
|
/// let schema = schema_builder.build();
|
||||||
/// let doc = doc!(
|
/// let doc = doc!(
|
||||||
/// title => "Life Aquatic",
|
/// title => "Life Aquatic",
|
||||||
/// author => "Wes Anderson",
|
/// author => "Wes Anderson",
|
||||||
/// likes => 4u64
|
/// likes => 4u64
|
||||||
/// );
|
/// );
|
||||||
/// # }
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
|
|||||||
@@ -36,10 +36,11 @@ struct Positions {
|
|||||||
|
|
||||||
impl Positions {
|
impl Positions {
|
||||||
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
|
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
|
||||||
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES);
|
let skip_len = skip_source.len();
|
||||||
|
let (body, footer) = skip_source.split(skip_len - u32::SIZE_IN_BYTES);
|
||||||
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
||||||
let (skip_source, long_skip_source) =
|
let body_split = body.len() - u64::SIZE_IN_BYTES * (num_long_skips as usize);
|
||||||
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize));
|
let (skip_source, long_skip_source) = body.split(body_split);
|
||||||
Positions {
|
Positions {
|
||||||
bit_packer: BitPacker4x::new(),
|
bit_packer: BitPacker4x::new(),
|
||||||
skip_source,
|
skip_source,
|
||||||
|
|||||||
@@ -54,21 +54,21 @@ where
|
|||||||
match self.excluding_state {
|
match self.excluding_state {
|
||||||
State::ExcludeOne(excluded_doc) => {
|
State::ExcludeOne(excluded_doc) => {
|
||||||
if doc == excluded_doc {
|
if doc == excluded_doc {
|
||||||
return false;
|
false
|
||||||
}
|
} else if excluded_doc > doc {
|
||||||
if excluded_doc > doc {
|
true
|
||||||
return true;
|
} else {
|
||||||
}
|
match self.excluding_docset.skip_next(doc) {
|
||||||
match self.excluding_docset.skip_next(doc) {
|
SkipResult::OverStep => {
|
||||||
SkipResult::OverStep => {
|
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
|
||||||
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
|
true
|
||||||
true
|
}
|
||||||
|
SkipResult::End => {
|
||||||
|
self.excluding_state = State::Finished;
|
||||||
|
true
|
||||||
|
}
|
||||||
|
SkipResult::Reached => false,
|
||||||
}
|
}
|
||||||
SkipResult::End => {
|
|
||||||
self.excluding_state = State::Finished;
|
|
||||||
true
|
|
||||||
}
|
|
||||||
SkipResult::Reached => false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
State::Finished => true,
|
State::Finished => true,
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
|
|||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, Index, Result, Term};
|
/// use tantivy::{doc, Index, Result, Term};
|
||||||
///
|
///
|
||||||
|
/// # fn main() { example().unwrap(); }
|
||||||
/// fn example() -> Result<()> {
|
/// fn example() -> Result<()> {
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
@@ -58,6 +59,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
|
|||||||
/// let searcher = reader.searcher();
|
/// let searcher = reader.searcher();
|
||||||
///
|
///
|
||||||
/// {
|
/// {
|
||||||
|
///
|
||||||
/// let term = Term::from_field_text(title, "Diary");
|
/// let term = Term::from_field_text(title, "Diary");
|
||||||
/// let query = FuzzyTermQuery::new(term, 1, true);
|
/// let query = FuzzyTermQuery::new(term, 1, true);
|
||||||
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
|
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
|
||||||
@@ -67,7 +69,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
|
|||||||
///
|
///
|
||||||
/// Ok(())
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
/// # assert!(example().is_ok());
|
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct FuzzyTermQuery {
|
pub struct FuzzyTermQuery {
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use crate::postings::Postings;
|
|||||||
use crate::query::bm25::BM25Weight;
|
use crate::query::bm25::BM25Weight;
|
||||||
use crate::query::{Intersection, Scorer};
|
use crate::query::{Intersection, Scorer};
|
||||||
use crate::DocId;
|
use crate::DocId;
|
||||||
use std::cmp::Ordering;
|
|
||||||
|
|
||||||
struct PostingsWithOffset<TPostings> {
|
struct PostingsWithOffset<TPostings> {
|
||||||
offset: u32,
|
offset: u32,
|
||||||
@@ -60,16 +59,12 @@ fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
|
|||||||
while left_i < left.len() && right_i < right.len() {
|
while left_i < left.len() && right_i < right.len() {
|
||||||
let left_val = left[left_i];
|
let left_val = left[left_i];
|
||||||
let right_val = right[right_i];
|
let right_val = right[right_i];
|
||||||
match left_val.cmp(&right_val) {
|
if left_val < right_val {
|
||||||
Ordering::Less => {
|
left_i += 1;
|
||||||
left_i += 1;
|
} else if right_val < left_val {
|
||||||
}
|
right_i += 1;
|
||||||
Ordering::Equal => {
|
} else {
|
||||||
return true;
|
return true;
|
||||||
}
|
|
||||||
Ordering::Greater => {
|
|
||||||
right_i += 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
@@ -82,18 +77,14 @@ fn intersection_count(left: &[u32], right: &[u32]) -> usize {
|
|||||||
while left_i < left.len() && right_i < right.len() {
|
while left_i < left.len() && right_i < right.len() {
|
||||||
let left_val = left[left_i];
|
let left_val = left[left_i];
|
||||||
let right_val = right[right_i];
|
let right_val = right[right_i];
|
||||||
match left_val.cmp(&right_val) {
|
if left_val < right_val {
|
||||||
Ordering::Less => {
|
left_i += 1;
|
||||||
left_i += 1;
|
} else if right_val < left_val {
|
||||||
}
|
right_i += 1;
|
||||||
Ordering::Equal => {
|
} else {
|
||||||
count += 1;
|
count += 1;
|
||||||
left_i += 1;
|
left_i += 1;
|
||||||
right_i += 1;
|
right_i += 1;
|
||||||
}
|
|
||||||
Ordering::Greater => {
|
|
||||||
right_i += 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
@@ -112,19 +103,15 @@ fn intersection(left: &mut [u32], right: &[u32]) -> usize {
|
|||||||
while left_i < left_len && right_i < right_len {
|
while left_i < left_len && right_i < right_len {
|
||||||
let left_val = left[left_i];
|
let left_val = left[left_i];
|
||||||
let right_val = right[right_i];
|
let right_val = right[right_i];
|
||||||
match left_val.cmp(&right_val) {
|
if left_val < right_val {
|
||||||
Ordering::Less => {
|
left_i += 1;
|
||||||
left_i += 1;
|
} else if right_val < left_val {
|
||||||
}
|
right_i += 1;
|
||||||
Ordering::Equal => {
|
} else {
|
||||||
left[count] = left_val;
|
left[count] = left_val;
|
||||||
count += 1;
|
count += 1;
|
||||||
left_i += 1;
|
left_i += 1;
|
||||||
right_i += 1;
|
right_i += 1;
|
||||||
}
|
|
||||||
Ordering::Greater => {
|
|
||||||
right_i += 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
count
|
count
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use crate::query::PhraseQuery;
|
|||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
use crate::query::RangeQuery;
|
use crate::query::RangeQuery;
|
||||||
use crate::query::TermQuery;
|
use crate::query::TermQuery;
|
||||||
use crate::schema::{Facet, IndexRecordOption};
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::{Field, Schema};
|
use crate::schema::{Field, Schema};
|
||||||
use crate::schema::{FieldType, Term};
|
use crate::schema::{FieldType, Term};
|
||||||
use crate::tokenizer::TokenizerManager;
|
use crate::tokenizer::TokenizerManager;
|
||||||
@@ -319,10 +319,7 @@ impl QueryParser {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FieldType::HierarchicalFacet => {
|
FieldType::HierarchicalFacet => Ok(vec![(0, Term::from_field_text(field, phrase))]),
|
||||||
let facet = Facet::from_text(phrase);
|
|
||||||
Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))])
|
|
||||||
}
|
|
||||||
FieldType::Bytes => {
|
FieldType::Bytes => {
|
||||||
let field_name = self.schema.get_field_name(field).to_string();
|
let field_name = self.schema.get_field_name(field).to_string();
|
||||||
Err(QueryParserError::FieldNotIndexed(field_name))
|
Err(QueryParserError::FieldNotIndexed(field_name))
|
||||||
@@ -533,7 +530,7 @@ mod test {
|
|||||||
use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
|
use crate::schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
|
||||||
use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT};
|
use crate::schema::{Schema, Term, INDEXED, STORED, STRING, TEXT};
|
||||||
use crate::tokenizer::{
|
use crate::tokenizer::{
|
||||||
LowerCaser, SimpleTokenizer, StopWordFilter, TextAnalyzer, TokenizerManager,
|
LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager,
|
||||||
};
|
};
|
||||||
use crate::Index;
|
use crate::Index;
|
||||||
use matches::assert_matches;
|
use matches::assert_matches;
|
||||||
@@ -557,13 +554,12 @@ mod test {
|
|||||||
schema_builder.add_text_field("with_stop_words", text_options);
|
schema_builder.add_text_field("with_stop_words", text_options);
|
||||||
schema_builder.add_date_field("date", INDEXED);
|
schema_builder.add_date_field("date", INDEXED);
|
||||||
schema_builder.add_f64_field("float", INDEXED);
|
schema_builder.add_f64_field("float", INDEXED);
|
||||||
schema_builder.add_facet_field("facet");
|
|
||||||
let schema = schema_builder.build();
|
let schema = schema_builder.build();
|
||||||
let default_fields = vec![title, text];
|
let default_fields = vec![title, text];
|
||||||
let tokenizer_manager = TokenizerManager::default();
|
let tokenizer_manager = TokenizerManager::default();
|
||||||
tokenizer_manager.register(
|
tokenizer_manager.register(
|
||||||
"en_with_stop_words",
|
"en_with_stop_words",
|
||||||
TextAnalyzer::from(SimpleTokenizer)
|
SimpleTokenizer
|
||||||
.filter(LowerCaser)
|
.filter(LowerCaser)
|
||||||
.filter(StopWordFilter::remove(vec!["the".to_string()])),
|
.filter(StopWordFilter::remove(vec!["the".to_string()])),
|
||||||
);
|
);
|
||||||
@@ -592,13 +588,9 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_parse_query_facet() {
|
pub fn test_parse_query_simple() {
|
||||||
let query_parser = make_query_parser();
|
let query_parser = make_query_parser();
|
||||||
let query = query_parser.parse_query("facet:/root/branch/leaf").unwrap();
|
assert!(query_parser.parse_query("toto").is_ok());
|
||||||
assert_eq!(
|
|
||||||
format!("{:?}", query),
|
|
||||||
"TermQuery(Term(field=11,bytes=[114, 111, 111, 116, 0, 98, 114, 97, 110, 99, 104, 0, 108, 101, 97, 102]))"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -38,33 +38,41 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
|||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use tantivy::collector::Count;
|
/// # use tantivy::collector::Count;
|
||||||
/// use tantivy::query::RangeQuery;
|
/// # use tantivy::query::RangeQuery;
|
||||||
/// use tantivy::schema::{Schema, INDEXED};
|
/// # use tantivy::schema::{Schema, INDEXED};
|
||||||
/// use tantivy::{doc, Index};
|
/// # use tantivy::{doc, Index, Result};
|
||||||
/// # fn test() -> tantivy::Result<()> {
|
/// #
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// # fn run() -> Result<()> {
|
||||||
/// let year_field = schema_builder.add_u64_field("year", INDEXED);
|
/// # let mut schema_builder = Schema::builder();
|
||||||
/// let schema = schema_builder.build();
|
/// # let year_field = schema_builder.add_u64_field("year", INDEXED);
|
||||||
///
|
/// # let schema = schema_builder.build();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// #
|
||||||
/// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
|
/// # let index = Index::create_in_ram(schema);
|
||||||
/// for year in 1950u64..2017u64 {
|
/// # {
|
||||||
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
/// # let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
|
||||||
/// for _ in 0..num_docs_within_year {
|
/// # for year in 1950u64..2017u64 {
|
||||||
/// index_writer.add_document(doc!(year_field => year));
|
/// # let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
|
||||||
/// }
|
/// # for _ in 0..num_docs_within_year {
|
||||||
/// }
|
/// # index_writer.add_document(doc!(year_field => year));
|
||||||
/// index_writer.commit()?;
|
/// # }
|
||||||
///
|
/// # }
|
||||||
/// let reader = index.reader()?;
|
/// # index_writer.commit().unwrap();
|
||||||
|
/// # }
|
||||||
|
/// # let reader = index.reader()?;
|
||||||
/// let searcher = reader.searcher();
|
/// let searcher = reader.searcher();
|
||||||
|
///
|
||||||
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
|
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
|
||||||
|
///
|
||||||
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
|
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
|
||||||
/// assert_eq!(num_60s_books, 2285);
|
///
|
||||||
/// Ok(())
|
/// # assert_eq!(num_60s_books, 2285);
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
|
/// #
|
||||||
|
/// # fn main() {
|
||||||
|
/// # run().unwrap()
|
||||||
/// # }
|
/// # }
|
||||||
/// # assert!(test().is_ok());
|
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct RangeQuery {
|
pub struct RangeQuery {
|
||||||
|
|||||||
@@ -15,40 +15,40 @@ use tantivy_fst::Regex;
|
|||||||
/// use tantivy::collector::Count;
|
/// use tantivy::collector::Count;
|
||||||
/// use tantivy::query::RegexQuery;
|
/// use tantivy::query::RegexQuery;
|
||||||
/// use tantivy::schema::{Schema, TEXT};
|
/// use tantivy::schema::{Schema, TEXT};
|
||||||
/// use tantivy::{doc, Index, Term};
|
/// use tantivy::{doc, Index, Result, Term};
|
||||||
///
|
///
|
||||||
/// # fn test() -> tantivy::Result<()> {
|
/// # fn main() { example().unwrap(); }
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// fn example() -> Result<()> {
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// let schema = schema_builder.build();
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let schema = schema_builder.build();
|
||||||
/// {
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// let mut index_writer = index.writer(3_000_000)?;
|
/// {
|
||||||
/// index_writer.add_document(doc!(
|
/// let mut index_writer = index.writer(3_000_000)?;
|
||||||
/// title => "The Name of the Wind",
|
/// index_writer.add_document(doc!(
|
||||||
/// ));
|
/// title => "The Name of the Wind",
|
||||||
/// index_writer.add_document(doc!(
|
/// ));
|
||||||
/// title => "The Diary of Muadib",
|
/// index_writer.add_document(doc!(
|
||||||
/// ));
|
/// title => "The Diary of Muadib",
|
||||||
/// index_writer.add_document(doc!(
|
/// ));
|
||||||
/// title => "A Dairy Cow",
|
/// index_writer.add_document(doc!(
|
||||||
/// ));
|
/// title => "A Dairy Cow",
|
||||||
/// index_writer.add_document(doc!(
|
/// ));
|
||||||
/// title => "The Diary of a Young Girl",
|
/// index_writer.add_document(doc!(
|
||||||
/// ));
|
/// title => "The Diary of a Young Girl",
|
||||||
/// index_writer.commit().unwrap();
|
/// ));
|
||||||
|
/// index_writer.commit().unwrap();
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// let reader = index.reader()?;
|
||||||
|
/// let searcher = reader.searcher();
|
||||||
|
///
|
||||||
|
/// let term = Term::from_field_text(title, "Diary");
|
||||||
|
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
|
||||||
|
/// let count = searcher.search(&query, &Count)?;
|
||||||
|
/// assert_eq!(count, 3);
|
||||||
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
///
|
|
||||||
/// let reader = index.reader()?;
|
|
||||||
/// let searcher = reader.searcher();
|
|
||||||
///
|
|
||||||
/// let term = Term::from_field_text(title, "Diary");
|
|
||||||
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
|
|
||||||
/// let count = searcher.search(&query, &Count)?;
|
|
||||||
/// assert_eq!(count, 3);
|
|
||||||
/// Ok(())
|
|
||||||
/// # }
|
|
||||||
/// # assert!(test().is_ok());
|
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RegexQuery {
|
pub struct RegexQuery {
|
||||||
|
|||||||
@@ -23,39 +23,42 @@ use std::fmt;
|
|||||||
/// use tantivy::collector::{Count, TopDocs};
|
/// use tantivy::collector::{Count, TopDocs};
|
||||||
/// use tantivy::query::TermQuery;
|
/// use tantivy::query::TermQuery;
|
||||||
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
|
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
|
||||||
/// use tantivy::{doc, Index, Term};
|
/// use tantivy::{doc, Index, Result, Term};
|
||||||
/// # fn test() -> tantivy::Result<()> {
|
///
|
||||||
/// let mut schema_builder = Schema::builder();
|
/// # fn main() { example().unwrap(); }
|
||||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
/// fn example() -> Result<()> {
|
||||||
/// let schema = schema_builder.build();
|
/// let mut schema_builder = Schema::builder();
|
||||||
/// let index = Index::create_in_ram(schema);
|
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||||
/// {
|
/// let schema = schema_builder.build();
|
||||||
/// let mut index_writer = index.writer(3_000_000)?;
|
/// let index = Index::create_in_ram(schema);
|
||||||
/// index_writer.add_document(doc!(
|
/// {
|
||||||
/// title => "The Name of the Wind",
|
/// let mut index_writer = index.writer(3_000_000)?;
|
||||||
/// ));
|
/// index_writer.add_document(doc!(
|
||||||
/// index_writer.add_document(doc!(
|
/// title => "The Name of the Wind",
|
||||||
/// title => "The Diary of Muadib",
|
/// ));
|
||||||
/// ));
|
/// index_writer.add_document(doc!(
|
||||||
/// index_writer.add_document(doc!(
|
/// title => "The Diary of Muadib",
|
||||||
/// title => "A Dairy Cow",
|
/// ));
|
||||||
/// ));
|
/// index_writer.add_document(doc!(
|
||||||
/// index_writer.add_document(doc!(
|
/// title => "A Dairy Cow",
|
||||||
/// title => "The Diary of a Young Girl",
|
/// ));
|
||||||
/// ));
|
/// index_writer.add_document(doc!(
|
||||||
/// index_writer.commit()?;
|
/// title => "The Diary of a Young Girl",
|
||||||
|
/// ));
|
||||||
|
/// index_writer.commit()?;
|
||||||
|
/// }
|
||||||
|
/// let reader = index.reader()?;
|
||||||
|
/// let searcher = reader.searcher();
|
||||||
|
///
|
||||||
|
/// let query = TermQuery::new(
|
||||||
|
/// Term::from_field_text(title, "diary"),
|
||||||
|
/// IndexRecordOption::Basic,
|
||||||
|
/// );
|
||||||
|
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
|
||||||
|
/// assert_eq!(count, 2);
|
||||||
|
///
|
||||||
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
/// let reader = index.reader()?;
|
|
||||||
/// let searcher = reader.searcher();
|
|
||||||
/// let query = TermQuery::new(
|
|
||||||
/// Term::from_field_text(title, "diary"),
|
|
||||||
/// IndexRecordOption::Basic,
|
|
||||||
/// );
|
|
||||||
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count))?;
|
|
||||||
/// assert_eq!(count, 2);
|
|
||||||
/// Ok(())
|
|
||||||
/// # }
|
|
||||||
/// # assert!(test().is_ok());
|
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct TermQuery {
|
pub struct TermQuery {
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ use crate::Result;
|
|||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
use crate::SegmentReader;
|
use crate::SegmentReader;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
|
||||||
|
|
||||||
/// Defines when a new version of the index should be reloaded.
|
/// Defines when a new version of the index should be reloaded.
|
||||||
///
|
///
|
||||||
@@ -163,11 +162,6 @@ pub struct IndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl IndexReader {
|
impl IndexReader {
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) fn index(&self) -> Index {
|
|
||||||
self.inner.index.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update searchers so that they reflect the state of the last
|
/// Update searchers so that they reflect the state of the last
|
||||||
/// `.commit()`.
|
/// `.commit()`.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -167,7 +167,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::Pool;
|
use super::Pool;
|
||||||
use super::Queue;
|
use super::Queue;
|
||||||
use std::{iter, mem};
|
use std::iter;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_pool() {
|
fn test_pool() {
|
||||||
@@ -197,67 +197,33 @@ mod tests {
|
|||||||
fn test_pool_dont_panic_on_empty_pop() {
|
fn test_pool_dont_panic_on_empty_pop() {
|
||||||
// When the object pool is exhausted, it shouldn't panic on pop()
|
// When the object pool is exhausted, it shouldn't panic on pop()
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::{thread, time};
|
||||||
|
|
||||||
// Wrap the pool in an Arc, same way as its used in `core/index.rs`
|
// Wrap the pool in an Arc, same way as its used in `core/index.rs`
|
||||||
let pool1 = Arc::new(Pool::new());
|
let pool = Arc::new(Pool::new());
|
||||||
// clone pools outside the move scope of each new thread
|
// clone pools outside the move scope of each new thread
|
||||||
let pool2 = Arc::clone(&pool1);
|
let pool1 = Arc::clone(&pool);
|
||||||
let pool3 = Arc::clone(&pool1);
|
let pool2 = Arc::clone(&pool);
|
||||||
|
|
||||||
let elements_for_pool = vec![1, 2];
|
let elements_for_pool = vec![1, 2];
|
||||||
pool1.publish_new_generation(elements_for_pool);
|
pool.publish_new_generation(elements_for_pool);
|
||||||
|
|
||||||
let mut threads = vec![];
|
let mut threads = vec![];
|
||||||
|
let sleep_dur = time::Duration::from_millis(10);
|
||||||
// spawn one more thread than there are elements in the pool
|
// spawn one more thread than there are elements in the pool
|
||||||
|
|
||||||
let (start_1_send, start_1_recv) = crossbeam::bounded(0);
|
|
||||||
let (start_2_send, start_2_recv) = crossbeam::bounded(0);
|
|
||||||
let (start_3_send, start_3_recv) = crossbeam::bounded(0);
|
|
||||||
|
|
||||||
let (event_send1, event_recv) = crossbeam::unbounded();
|
|
||||||
let event_send2 = event_send1.clone();
|
|
||||||
let event_send3 = event_send1.clone();
|
|
||||||
|
|
||||||
threads.push(thread::spawn(move || {
|
threads.push(thread::spawn(move || {
|
||||||
assert_eq!(start_1_recv.recv(), Ok("start"));
|
// leasing to make sure it's not dropped before sleep is called
|
||||||
|
let _leased_searcher = &pool.acquire();
|
||||||
|
thread::sleep(sleep_dur);
|
||||||
|
}));
|
||||||
|
threads.push(thread::spawn(move || {
|
||||||
|
// leasing to make sure it's not dropped before sleep is called
|
||||||
let _leased_searcher = &pool1.acquire();
|
let _leased_searcher = &pool1.acquire();
|
||||||
assert!(event_send1.send("1 acquired").is_ok());
|
thread::sleep(sleep_dur);
|
||||||
assert_eq!(start_1_recv.recv(), Ok("stop"));
|
|
||||||
assert!(event_send1.send("1 stopped").is_ok());
|
|
||||||
mem::drop(_leased_searcher);
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
threads.push(thread::spawn(move || {
|
threads.push(thread::spawn(move || {
|
||||||
assert_eq!(start_2_recv.recv(), Ok("start"));
|
// leasing to make sure it's not dropped before sleep is called
|
||||||
let _leased_searcher = &pool2.acquire();
|
let _leased_searcher = &pool2.acquire();
|
||||||
assert!(event_send2.send("2 acquired").is_ok());
|
thread::sleep(sleep_dur);
|
||||||
assert_eq!(start_2_recv.recv(), Ok("stop"));
|
|
||||||
mem::drop(_leased_searcher);
|
|
||||||
assert!(event_send2.send("2 stopped").is_ok());
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
threads.push(thread::spawn(move || {
|
|
||||||
assert_eq!(start_3_recv.recv(), Ok("start"));
|
|
||||||
let _leased_searcher = &pool3.acquire();
|
|
||||||
assert!(event_send3.send("3 acquired").is_ok());
|
|
||||||
assert_eq!(start_3_recv.recv(), Ok("stop"));
|
|
||||||
mem::drop(_leased_searcher);
|
|
||||||
assert!(event_send3.send("3 stopped").is_ok());
|
|
||||||
}));
|
|
||||||
|
|
||||||
assert!(start_1_send.send("start").is_ok());
|
|
||||||
assert_eq!(event_recv.recv(), Ok("1 acquired"));
|
|
||||||
assert!(start_2_send.send("start").is_ok());
|
|
||||||
assert_eq!(event_recv.recv(), Ok("2 acquired"));
|
|
||||||
assert!(start_3_send.send("start").is_ok());
|
|
||||||
assert!(event_recv.try_recv().is_err());
|
|
||||||
assert!(start_1_send.send("stop").is_ok());
|
|
||||||
assert_eq!(event_recv.recv(), Ok("1 stopped"));
|
|
||||||
assert_eq!(event_recv.recv(), Ok("3 acquired"));
|
|
||||||
assert!(start_3_send.send("stop").is_ok());
|
|
||||||
assert_eq!(event_recv.recv(), Ok("3 stopped"));
|
|
||||||
assert!(start_2_send.send("stop").is_ok());
|
|
||||||
assert_eq!(event_recv.recv(), Ok("2 stopped"));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::common::BinarySerializable;
|
use crate::common::BinarySerializable;
|
||||||
use crate::common::VInt;
|
use crate::common::VInt;
|
||||||
use crate::tokenizer::PreTokenizedString;
|
|
||||||
use crate::DateTime;
|
use crate::DateTime;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
@@ -30,8 +29,8 @@ impl From<Vec<FieldValue>> for Document {
|
|||||||
impl PartialEq for Document {
|
impl PartialEq for Document {
|
||||||
fn eq(&self, other: &Document) -> bool {
|
fn eq(&self, other: &Document) -> bool {
|
||||||
// super slow, but only here for tests
|
// super slow, but only here for tests
|
||||||
let mut self_field_values: Vec<&_> = self.field_values.iter().collect();
|
let mut self_field_values = self.field_values.clone();
|
||||||
let mut other_field_values: Vec<&_> = other.field_values.iter().collect();
|
let mut other_field_values = other.field_values.clone();
|
||||||
self_field_values.sort();
|
self_field_values.sort();
|
||||||
other_field_values.sort();
|
other_field_values.sort();
|
||||||
self_field_values.eq(&other_field_values)
|
self_field_values.eq(&other_field_values)
|
||||||
@@ -79,16 +78,6 @@ impl Document {
|
|||||||
self.add(FieldValue::new(field, value));
|
self.add(FieldValue::new(field, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a pre-tokenized text field.
|
|
||||||
pub fn add_pre_tokenized_text(
|
|
||||||
&mut self,
|
|
||||||
field: Field,
|
|
||||||
pre_tokenized_text: &PreTokenizedString,
|
|
||||||
) {
|
|
||||||
let value = Value::PreTokStr(pre_tokenized_text.clone());
|
|
||||||
self.add(FieldValue::new(field, value));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a u64 field
|
/// Add a u64 field
|
||||||
pub fn add_u64(&mut self, field: Field, value: u64) {
|
pub fn add_u64(&mut self, field: Field, value: u64) {
|
||||||
self.add(FieldValue::new(field, Value::U64(value)));
|
self.add(FieldValue::new(field, Value::U64(value)));
|
||||||
@@ -155,21 +144,6 @@ impl Document {
|
|||||||
.find(|field_value| field_value.field() == field)
|
.find(|field_value| field_value.field() == field)
|
||||||
.map(FieldValue::value)
|
.map(FieldValue::value)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepares Document for being stored in the document store
|
|
||||||
///
|
|
||||||
/// Method transforms PreTokenizedString values into String
|
|
||||||
/// values.
|
|
||||||
pub fn prepare_for_store(&mut self) {
|
|
||||||
for field_value in &mut self.field_values {
|
|
||||||
if let Value::PreTokStr(pre_tokenized_text) = field_value.value() {
|
|
||||||
*field_value = FieldValue::new(
|
|
||||||
field_value.field(),
|
|
||||||
Value::Str(pre_tokenized_text.text.clone()), //< TODO somehow remove .clone()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BinarySerializable for Document {
|
impl BinarySerializable for Document {
|
||||||
@@ -195,7 +169,6 @@ impl BinarySerializable for Document {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use crate::tokenizer::{PreTokenizedString, Token};
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_doc() {
|
fn test_doc() {
|
||||||
@@ -205,38 +178,4 @@ mod tests {
|
|||||||
doc.add_text(text_field, "My title");
|
doc.add_text(text_field, "My title");
|
||||||
assert_eq!(doc.field_values().len(), 1);
|
assert_eq!(doc.field_values().len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_prepare_for_store() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let text_field = schema_builder.add_text_field("title", TEXT);
|
|
||||||
let mut doc = Document::default();
|
|
||||||
|
|
||||||
let pre_tokenized_text = PreTokenizedString {
|
|
||||||
text: String::from("A"),
|
|
||||||
tokens: vec![Token {
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 1,
|
|
||||||
position: 0,
|
|
||||||
text: String::from("A"),
|
|
||||||
position_length: 1,
|
|
||||||
}],
|
|
||||||
};
|
|
||||||
|
|
||||||
doc.add_pre_tokenized_text(text_field, &pre_tokenized_text);
|
|
||||||
doc.add_text(text_field, "title");
|
|
||||||
doc.prepare_for_store();
|
|
||||||
|
|
||||||
assert_eq!(doc.field_values().len(), 2);
|
|
||||||
|
|
||||||
match doc.field_values()[0].value() {
|
|
||||||
Value::Str(ref text) => assert_eq!(text, "A"),
|
|
||||||
_ => panic!("Incorrect variant of Value"),
|
|
||||||
}
|
|
||||||
|
|
||||||
match doc.field_values()[1].value() {
|
|
||||||
Value::Str(ref text) => assert_eq!(text, "title"),
|
|
||||||
_ => panic!("Incorrect variant of Value"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ impl Field {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a u32 identifying uniquely a field within a schema.
|
/// Returns a u32 identifying uniquely a field within a schema.
|
||||||
#[allow(clippy::trivially_copy_pass_by_ref)]
|
|
||||||
pub fn field_id(&self) -> u32 {
|
pub fn field_id(&self) -> u32 {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,11 @@
|
|||||||
use base64::decode;
|
use base64::decode;
|
||||||
|
|
||||||
|
use crate::schema::{IntOptions, TextOptions};
|
||||||
|
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::schema::IndexRecordOption;
|
use crate::schema::IndexRecordOption;
|
||||||
use crate::schema::TextFieldIndexing;
|
use crate::schema::TextFieldIndexing;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::schema::{IntOptions, TextOptions};
|
|
||||||
use crate::tokenizer::PreTokenizedString;
|
|
||||||
use chrono::{FixedOffset, Utc};
|
|
||||||
use serde_json::Value as JsonValue;
|
use serde_json::Value as JsonValue;
|
||||||
|
|
||||||
/// Possible error that may occur while parsing a field value
|
/// Possible error that may occur while parsing a field value
|
||||||
@@ -125,20 +124,13 @@ impl FieldType {
|
|||||||
pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> {
|
pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> {
|
||||||
match *json {
|
match *json {
|
||||||
JsonValue::String(ref field_text) => match *self {
|
JsonValue::String(ref field_text) => match *self {
|
||||||
FieldType::Date(_) => {
|
|
||||||
let dt_with_fixed_tz: chrono::DateTime<FixedOffset> =
|
|
||||||
chrono::DateTime::parse_from_rfc3339(field_text).map_err(|err|
|
|
||||||
ValueParsingError::TypeError(format!(
|
|
||||||
"Failed to parse date from JSON. Expected rfc3339 format, got {}. {:?}",
|
|
||||||
field_text, err
|
|
||||||
))
|
|
||||||
)?;
|
|
||||||
Ok(Value::Date(dt_with_fixed_tz.with_timezone(&Utc)))
|
|
||||||
}
|
|
||||||
FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
|
FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
|
||||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) => Err(
|
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {
|
||||||
ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)),
|
Err(ValueParsingError::TypeError(format!(
|
||||||
),
|
"Expected an integer, got {:?}",
|
||||||
|
json
|
||||||
|
)))
|
||||||
|
}
|
||||||
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
|
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
|
||||||
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
|
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
|
||||||
ValueParsingError::InvalidBase64(format!(
|
ValueParsingError::InvalidBase64(format!(
|
||||||
@@ -177,28 +169,6 @@ impl FieldType {
|
|||||||
Err(ValueParsingError::TypeError(msg))
|
Err(ValueParsingError::TypeError(msg))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
JsonValue::Object(_) => match *self {
|
|
||||||
FieldType::Str(_) => {
|
|
||||||
if let Ok(tok_str_val) =
|
|
||||||
serde_json::from_value::<PreTokenizedString>(json.clone())
|
|
||||||
{
|
|
||||||
Ok(Value::PreTokStr(tok_str_val))
|
|
||||||
} else {
|
|
||||||
let msg = format!(
|
|
||||||
"Json value {:?} cannot be translated to PreTokenizedString.",
|
|
||||||
json
|
|
||||||
);
|
|
||||||
Err(ValueParsingError::TypeError(msg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
let msg = format!(
|
|
||||||
"Json value not supported error {:?}. Expected {:?}",
|
|
||||||
json, self
|
|
||||||
);
|
|
||||||
Err(ValueParsingError::TypeError(msg))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ => {
|
_ => {
|
||||||
let msg = format!(
|
let msg = format!(
|
||||||
"Json value not supported error {:?}. Expected {:?}",
|
"Json value not supported error {:?}. Expected {:?}",
|
||||||
@@ -214,37 +184,7 @@ impl FieldType {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::FieldType;
|
use super::FieldType;
|
||||||
use crate::schema::field_type::ValueParsingError;
|
use crate::schema::field_type::ValueParsingError;
|
||||||
use crate::schema::TextOptions;
|
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::schema::{Schema, INDEXED};
|
|
||||||
use crate::tokenizer::{PreTokenizedString, Token};
|
|
||||||
use crate::{DateTime, Document};
|
|
||||||
use chrono::{NaiveDate, NaiveDateTime, NaiveTime, Utc};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_deserialize_json_date() {
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
|
|
||||||
let doc = schema.parse_document(doc_json).unwrap();
|
|
||||||
let date = doc.get_first(date_field).unwrap();
|
|
||||||
assert_eq!(format!("{:?}", date), "Date(2019-10-12T05:20:50.520Z)");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_serialize_json_date() {
|
|
||||||
let mut doc = Document::new();
|
|
||||||
let mut schema_builder = Schema::builder();
|
|
||||||
let date_field = schema_builder.add_date_field("date", INDEXED);
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let naive_date = NaiveDate::from_ymd(1982, 9, 17);
|
|
||||||
let naive_time = NaiveTime::from_hms(13, 20, 00);
|
|
||||||
let date_time = DateTime::from_utc(NaiveDateTime::new(naive_date, naive_time), Utc);
|
|
||||||
doc.add_date(date_field, &date_time);
|
|
||||||
let doc_json = schema.to_json(&doc);
|
|
||||||
assert_eq!(doc_json, r#"{"date":["1982-09-17T13:20:00+00:00"]}"#);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bytes_value_from_json() {
|
fn test_bytes_value_from_json() {
|
||||||
@@ -265,71 +205,4 @@ mod tests {
|
|||||||
_ => panic!("Expected parse failure for invalid base64"),
|
_ => panic!("Expected parse failure for invalid base64"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_pre_tok_str_value_from_json() {
|
|
||||||
let pre_tokenized_string_json = r#"{
|
|
||||||
"text": "The Old Man",
|
|
||||||
"tokens": [
|
|
||||||
{
|
|
||||||
"offset_from": 0,
|
|
||||||
"offset_to": 3,
|
|
||||||
"position": 0,
|
|
||||||
"text": "The",
|
|
||||||
"position_length": 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"offset_from": 4,
|
|
||||||
"offset_to": 7,
|
|
||||||
"position": 1,
|
|
||||||
"text": "Old",
|
|
||||||
"position_length": 1
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"offset_from": 8,
|
|
||||||
"offset_to": 11,
|
|
||||||
"position": 2,
|
|
||||||
"text": "Man",
|
|
||||||
"position_length": 1
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}"#;
|
|
||||||
|
|
||||||
let expected_value = Value::PreTokStr(PreTokenizedString {
|
|
||||||
text: String::from("The Old Man"),
|
|
||||||
tokens: vec![
|
|
||||||
Token {
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 3,
|
|
||||||
position: 0,
|
|
||||||
text: String::from("The"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 4,
|
|
||||||
offset_to: 7,
|
|
||||||
position: 1,
|
|
||||||
text: String::from("Old"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 8,
|
|
||||||
offset_to: 11,
|
|
||||||
position: 2,
|
|
||||||
text: String::from("Man"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
|
|
||||||
let deserialized_value = FieldType::Str(TextOptions::default())
|
|
||||||
.value_from_json(&serde_json::from_str(pre_tokenized_string_json).unwrap())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(deserialized_value, expected_value);
|
|
||||||
|
|
||||||
let serialized_value_json = serde_json::to_string_pretty(&expected_value).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(serialized_value_json, pre_tokenized_string_json);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ where
|
|||||||
fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output {
|
fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output {
|
||||||
SchemaFlagList {
|
SchemaFlagList {
|
||||||
head: head.head,
|
head: head.head,
|
||||||
tail: self,
|
tail: self.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ We can split the problem of generating a search result page into two phases :
|
|||||||
the search results page. (`doc_ids[] -> Document[]`)
|
the search results page. (`doc_ids[] -> Document[]`)
|
||||||
|
|
||||||
In the first phase, the ability to search for documents by the given field is determined by the
|
In the first phase, the ability to search for documents by the given field is determined by the
|
||||||
[`IndexRecordOption`](enum.IndexRecordOption.html) of our
|
[`TextIndexingOptions`](enum.TextIndexingOptions.html) of our
|
||||||
[`TextOptions`](struct.TextOptions.html).
|
[`TextOptions`](struct.TextOptions.html).
|
||||||
|
|
||||||
The effect of each possible setting is described more in detail
|
The effect of each possible setting is described more in detail
|
||||||
|
|||||||
@@ -166,7 +166,7 @@ impl SchemaBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a field entry to the schema in build.
|
/// Adds a field entry to the schema in build.
|
||||||
pub fn add_field(&mut self, field_entry: FieldEntry) -> Field {
|
fn add_field(&mut self, field_entry: FieldEntry) -> Field {
|
||||||
let field = Field::from_field_id(self.fields.len() as u32);
|
let field = Field::from_field_id(self.fields.len() as u32);
|
||||||
let field_name = field_entry.name().to_string();
|
let field_name = field_entry.name().to_string();
|
||||||
self.fields.push(field_entry);
|
self.fields.push(field_entry);
|
||||||
@@ -401,7 +401,6 @@ pub enum DocParsingError {
|
|||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use crate::schema::field_type::ValueParsingError;
|
use crate::schema::field_type::ValueParsingError;
|
||||||
use crate::schema::int_options::Cardinality::SingleValue;
|
|
||||||
use crate::schema::schema::DocParsingError::NotJSON;
|
use crate::schema::schema::DocParsingError::NotJSON;
|
||||||
use crate::schema::*;
|
use crate::schema::*;
|
||||||
use matches::{assert_matches, matches};
|
use matches::{assert_matches, matches};
|
||||||
@@ -716,94 +715,4 @@ mod tests {
|
|||||||
assert_matches!(json_err, Err(NotJSON(_)));
|
assert_matches!(json_err, Err(NotJSON(_)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_schema_add_field() {
|
|
||||||
let mut schema_builder = SchemaBuilder::default();
|
|
||||||
let id_options = TextOptions::default().set_stored().set_indexing_options(
|
|
||||||
TextFieldIndexing::default()
|
|
||||||
.set_tokenizer("raw")
|
|
||||||
.set_index_option(IndexRecordOption::Basic),
|
|
||||||
);
|
|
||||||
let timestamp_options = IntOptions::default()
|
|
||||||
.set_stored()
|
|
||||||
.set_indexed()
|
|
||||||
.set_fast(SingleValue);
|
|
||||||
schema_builder.add_text_field("_id", id_options);
|
|
||||||
schema_builder.add_date_field("_timestamp", timestamp_options);
|
|
||||||
|
|
||||||
let schema_content = r#"[
|
|
||||||
{
|
|
||||||
"name": "text",
|
|
||||||
"type": "text",
|
|
||||||
"options": {
|
|
||||||
"indexing": {
|
|
||||||
"record": "position",
|
|
||||||
"tokenizer": "default"
|
|
||||||
},
|
|
||||||
"stored": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "popularity",
|
|
||||||
"type": "i64",
|
|
||||||
"options": {
|
|
||||||
"indexed": false,
|
|
||||||
"fast": "single",
|
|
||||||
"stored": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]"#;
|
|
||||||
let tmp_schema: Schema =
|
|
||||||
serde_json::from_str(&schema_content).expect("error while reading json");
|
|
||||||
for (_field, field_entry) in tmp_schema.fields() {
|
|
||||||
schema_builder.add_field(field_entry.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
let schema = schema_builder.build();
|
|
||||||
let schema_json = serde_json::to_string_pretty(&schema).unwrap();
|
|
||||||
let expected = r#"[
|
|
||||||
{
|
|
||||||
"name": "_id",
|
|
||||||
"type": "text",
|
|
||||||
"options": {
|
|
||||||
"indexing": {
|
|
||||||
"record": "basic",
|
|
||||||
"tokenizer": "raw"
|
|
||||||
},
|
|
||||||
"stored": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "_timestamp",
|
|
||||||
"type": "date",
|
|
||||||
"options": {
|
|
||||||
"indexed": true,
|
|
||||||
"fast": "single",
|
|
||||||
"stored": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "text",
|
|
||||||
"type": "text",
|
|
||||||
"options": {
|
|
||||||
"indexing": {
|
|
||||||
"record": "position",
|
|
||||||
"tokenizer": "default"
|
|
||||||
},
|
|
||||||
"stored": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "popularity",
|
|
||||||
"type": "i64",
|
|
||||||
"options": {
|
|
||||||
"indexed": false,
|
|
||||||
"fast": "single",
|
|
||||||
"stored": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]"#;
|
|
||||||
assert_eq!(schema_json, expected);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::tokenizer::PreTokenizedString;
|
|
||||||
use crate::DateTime;
|
use crate::DateTime;
|
||||||
use serde::de::Visitor;
|
use serde::de::Visitor;
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
@@ -11,8 +10,6 @@ use std::{cmp::Ordering, fmt};
|
|||||||
pub enum Value {
|
pub enum Value {
|
||||||
/// The str type is used for any text information.
|
/// The str type is used for any text information.
|
||||||
Str(String),
|
Str(String),
|
||||||
/// Pre-tokenized str type,
|
|
||||||
PreTokStr(PreTokenizedString),
|
|
||||||
/// Unsigned 64-bits Integer `u64`
|
/// Unsigned 64-bits Integer `u64`
|
||||||
U64(u64),
|
U64(u64),
|
||||||
/// Signed 64-bits Integer `i64`
|
/// Signed 64-bits Integer `i64`
|
||||||
@@ -32,7 +29,6 @@ impl Ord for Value {
|
|||||||
fn cmp(&self, other: &Self) -> Ordering {
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
match (self, other) {
|
match (self, other) {
|
||||||
(Value::Str(l), Value::Str(r)) => l.cmp(r),
|
(Value::Str(l), Value::Str(r)) => l.cmp(r),
|
||||||
(Value::PreTokStr(l), Value::PreTokStr(r)) => l.cmp(r),
|
|
||||||
(Value::U64(l), Value::U64(r)) => l.cmp(r),
|
(Value::U64(l), Value::U64(r)) => l.cmp(r),
|
||||||
(Value::I64(l), Value::I64(r)) => l.cmp(r),
|
(Value::I64(l), Value::I64(r)) => l.cmp(r),
|
||||||
(Value::Date(l), Value::Date(r)) => l.cmp(r),
|
(Value::Date(l), Value::Date(r)) => l.cmp(r),
|
||||||
@@ -48,8 +44,6 @@ impl Ord for Value {
|
|||||||
}
|
}
|
||||||
(Value::Str(_), _) => Ordering::Less,
|
(Value::Str(_), _) => Ordering::Less,
|
||||||
(_, Value::Str(_)) => Ordering::Greater,
|
(_, Value::Str(_)) => Ordering::Greater,
|
||||||
(Value::PreTokStr(_), _) => Ordering::Less,
|
|
||||||
(_, Value::PreTokStr(_)) => Ordering::Greater,
|
|
||||||
(Value::U64(_), _) => Ordering::Less,
|
(Value::U64(_), _) => Ordering::Less,
|
||||||
(_, Value::U64(_)) => Ordering::Greater,
|
(_, Value::U64(_)) => Ordering::Greater,
|
||||||
(Value::I64(_), _) => Ordering::Less,
|
(Value::I64(_), _) => Ordering::Less,
|
||||||
@@ -71,11 +65,10 @@ impl Serialize for Value {
|
|||||||
{
|
{
|
||||||
match *self {
|
match *self {
|
||||||
Value::Str(ref v) => serializer.serialize_str(v),
|
Value::Str(ref v) => serializer.serialize_str(v),
|
||||||
Value::PreTokStr(ref v) => v.serialize(serializer),
|
|
||||||
Value::U64(u) => serializer.serialize_u64(u),
|
Value::U64(u) => serializer.serialize_u64(u),
|
||||||
Value::I64(u) => serializer.serialize_i64(u),
|
Value::I64(u) => serializer.serialize_i64(u),
|
||||||
Value::F64(u) => serializer.serialize_f64(u),
|
Value::F64(u) => serializer.serialize_f64(u),
|
||||||
Value::Date(ref date) => serializer.serialize_str(&date.to_rfc3339()),
|
Value::Date(ref date) => serializer.serialize_i64(date.timestamp()),
|
||||||
Value::Facet(ref facet) => facet.serialize(serializer),
|
Value::Facet(ref facet) => facet.serialize(serializer),
|
||||||
Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes),
|
Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes),
|
||||||
}
|
}
|
||||||
@@ -96,14 +89,14 @@ impl<'de> Deserialize<'de> for Value {
|
|||||||
formatter.write_str("a string or u32")
|
formatter.write_str("a string or u32")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
|
|
||||||
Ok(Value::I64(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
|
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
|
||||||
Ok(Value::U64(v))
|
Ok(Value::U64(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
|
||||||
|
Ok(Value::I64(v))
|
||||||
|
}
|
||||||
|
|
||||||
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> {
|
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> {
|
||||||
Ok(Value::F64(v))
|
Ok(Value::F64(v))
|
||||||
}
|
}
|
||||||
@@ -131,15 +124,6 @@ impl Value {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the tokenized text, provided the value is of the `PreTokStr` type.
|
|
||||||
/// (Returns None if the value is not of the `PreTokStr` type).
|
|
||||||
pub fn tokenized_text(&self) -> Option<&PreTokenizedString> {
|
|
||||||
match *self {
|
|
||||||
Value::PreTokStr(ref tok_text) => Some(tok_text),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the u64-value, provided the value is of the `U64` type.
|
/// Returns the u64-value, provided the value is of the `U64` type.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
@@ -209,8 +193,8 @@ impl From<f64> for Value {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<crate::DateTime> for Value {
|
impl From<DateTime> for Value {
|
||||||
fn from(date_time: crate::DateTime) -> Value {
|
fn from(date_time: DateTime) -> Value {
|
||||||
Value::Date(date_time)
|
Value::Date(date_time)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -233,17 +217,10 @@ impl From<Vec<u8>> for Value {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PreTokenizedString> for Value {
|
|
||||||
fn from(pretokenized_string: PreTokenizedString) -> Value {
|
|
||||||
Value::PreTokStr(pretokenized_string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mod binary_serialize {
|
mod binary_serialize {
|
||||||
use super::Value;
|
use super::Value;
|
||||||
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
|
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::tokenizer::PreTokenizedString;
|
|
||||||
use chrono::{TimeZone, Utc};
|
use chrono::{TimeZone, Utc};
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
|
|
||||||
@@ -254,11 +231,6 @@ mod binary_serialize {
|
|||||||
const BYTES_CODE: u8 = 4;
|
const BYTES_CODE: u8 = 4;
|
||||||
const DATE_CODE: u8 = 5;
|
const DATE_CODE: u8 = 5;
|
||||||
const F64_CODE: u8 = 6;
|
const F64_CODE: u8 = 6;
|
||||||
const EXT_CODE: u8 = 7;
|
|
||||||
|
|
||||||
// extended types
|
|
||||||
|
|
||||||
const TOK_STR_CODE: u8 = 0;
|
|
||||||
|
|
||||||
impl BinarySerializable for Value {
|
impl BinarySerializable for Value {
|
||||||
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
@@ -267,18 +239,6 @@ mod binary_serialize {
|
|||||||
TEXT_CODE.serialize(writer)?;
|
TEXT_CODE.serialize(writer)?;
|
||||||
text.serialize(writer)
|
text.serialize(writer)
|
||||||
}
|
}
|
||||||
Value::PreTokStr(ref tok_str) => {
|
|
||||||
EXT_CODE.serialize(writer)?;
|
|
||||||
TOK_STR_CODE.serialize(writer)?;
|
|
||||||
if let Ok(text) = serde_json::to_string(tok_str) {
|
|
||||||
text.serialize(writer)
|
|
||||||
} else {
|
|
||||||
Err(io::Error::new(
|
|
||||||
io::ErrorKind::Other,
|
|
||||||
"Failed to dump Value::PreTokStr(_) to json.",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Value::U64(ref val) => {
|
Value::U64(ref val) => {
|
||||||
U64_CODE.serialize(writer)?;
|
U64_CODE.serialize(writer)?;
|
||||||
val.serialize(writer)
|
val.serialize(writer)
|
||||||
@@ -330,30 +290,6 @@ mod binary_serialize {
|
|||||||
}
|
}
|
||||||
HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)),
|
HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)),
|
||||||
BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)),
|
BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)),
|
||||||
EXT_CODE => {
|
|
||||||
let ext_type_code = u8::deserialize(reader)?;
|
|
||||||
match ext_type_code {
|
|
||||||
TOK_STR_CODE => {
|
|
||||||
let str_val = String::deserialize(reader)?;
|
|
||||||
if let Ok(value) = serde_json::from_str::<PreTokenizedString>(&str_val)
|
|
||||||
{
|
|
||||||
Ok(Value::PreTokStr(value))
|
|
||||||
} else {
|
|
||||||
Err(io::Error::new(
|
|
||||||
io::ErrorKind::Other,
|
|
||||||
"Failed to parse string data as Value::PreTokStr(_).",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidData,
|
|
||||||
format!(
|
|
||||||
"No extened field type is associated with code {:?}",
|
|
||||||
ext_type_code
|
|
||||||
),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => Err(io::Error::new(
|
_ => Err(io::Error::new(
|
||||||
io::ErrorKind::InvalidData,
|
io::ErrorKind::InvalidData,
|
||||||
format!("No field type is associated with code {:?}", type_code),
|
format!("No field type is associated with code {:?}", type_code),
|
||||||
@@ -362,17 +298,3 @@ mod binary_serialize {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::Value;
|
|
||||||
use crate::DateTime;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_serialize_date() {
|
|
||||||
let value = Value::Date(DateTime::from_str("1996-12-20T00:39:57+00:00").unwrap());
|
|
||||||
let serialized_value_json = serde_json::to_string_pretty(&value).unwrap();
|
|
||||||
assert_eq!(serialized_value_json, r#""1996-12-20T00:39:57+00:00""#);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use crate::query::Query;
|
use crate::query::Query;
|
||||||
use crate::schema::Field;
|
use crate::schema::Field;
|
||||||
use crate::schema::Value;
|
use crate::schema::Value;
|
||||||
use crate::tokenizer::{TextAnalyzer, Token};
|
use crate::tokenizer::BoxedTokenizer;
|
||||||
|
use crate::tokenizer::{Token, TokenStream};
|
||||||
use crate::Document;
|
use crate::Document;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use crate::Searcher;
|
use crate::Searcher;
|
||||||
@@ -141,7 +142,7 @@ impl Snippet {
|
|||||||
/// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\
|
/// Fragments must be valid in the sense that `&text[fragment.start..fragment.stop]`\
|
||||||
/// has to be a valid string.
|
/// has to be a valid string.
|
||||||
fn search_fragments<'a>(
|
fn search_fragments<'a>(
|
||||||
tokenizer: &TextAnalyzer,
|
tokenizer: &BoxedTokenizer,
|
||||||
text: &'a str,
|
text: &'a str,
|
||||||
terms: &BTreeMap<String, f32>,
|
terms: &BTreeMap<String, f32>,
|
||||||
max_num_chars: usize,
|
max_num_chars: usize,
|
||||||
@@ -250,7 +251,7 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
|
|||||||
/// ```
|
/// ```
|
||||||
pub struct SnippetGenerator {
|
pub struct SnippetGenerator {
|
||||||
terms_text: BTreeMap<String, f32>,
|
terms_text: BTreeMap<String, f32>,
|
||||||
tokenizer: TextAnalyzer,
|
tokenizer: BoxedTokenizer,
|
||||||
field: Field,
|
field: Field,
|
||||||
max_num_chars: usize,
|
max_num_chars: usize,
|
||||||
}
|
}
|
||||||
@@ -330,8 +331,9 @@ mod tests {
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::iter::Iterator;
|
use std::iter::Iterator;
|
||||||
|
|
||||||
const TEST_TEXT: &'static str = r#"Rust is a systems programming language sponsored by
|
const TEST_TEXT: &'static str =
|
||||||
Mozilla which describes it as a "safe, concurrent, practical language", supporting functional and
|
r#"Rust is a systems programming language sponsored by Mozilla which
|
||||||
|
describes it as a "safe, concurrent, practical language", supporting functional and
|
||||||
imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?],
|
imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?],
|
||||||
but its designers intend it to provide better memory safety while still maintaining
|
but its designers intend it to provide better memory safety while still maintaining
|
||||||
performance.
|
performance.
|
||||||
@@ -346,11 +348,12 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_snippet() {
|
fn test_snippet() {
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
let terms = btreemap! {
|
let terms = btreemap! {
|
||||||
String::from("rust") => 1.0,
|
String::from("rust") => 1.0,
|
||||||
String::from("language") => 0.9
|
String::from("language") => 0.9
|
||||||
};
|
};
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 100);
|
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 100);
|
||||||
assert_eq!(fragments.len(), 7);
|
assert_eq!(fragments.len(), 7);
|
||||||
{
|
{
|
||||||
let first = &fragments[0];
|
let first = &fragments[0];
|
||||||
@@ -360,24 +363,25 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
|
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
snippet.fragments,
|
snippet.fragments,
|
||||||
"Rust is a systems programming language sponsored by\n\
|
"Rust is a systems programming language sponsored by \
|
||||||
Mozilla which describes it as a \"safe"
|
Mozilla which\ndescribes it as a \"safe"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
snippet.to_html(),
|
snippet.to_html(),
|
||||||
"<b>Rust</b> is a systems programming <b>language</b> \
|
"<b>Rust</b> is a systems programming <b>language</b> \
|
||||||
sponsored by\nMozilla which describes it as a "safe"
|
sponsored by Mozilla which\ndescribes it as a "safe"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_snippet_scored_fragment() {
|
fn test_snippet_scored_fragment() {
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
{
|
{
|
||||||
let terms = btreemap! {
|
let terms = btreemap! {
|
||||||
String::from("rust") =>1.0f32,
|
String::from("rust") =>1.0f32,
|
||||||
String::from("language") => 0.9f32
|
String::from("language") => 0.9f32
|
||||||
};
|
};
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
|
||||||
{
|
{
|
||||||
let first = &fragments[0];
|
let first = &fragments[0];
|
||||||
assert_eq!(first.score, 1.0);
|
assert_eq!(first.score, 1.0);
|
||||||
@@ -386,12 +390,13 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
|
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
|
||||||
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems")
|
assert_eq!(snippet.to_html(), "<b>Rust</b> is a systems")
|
||||||
}
|
}
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
{
|
{
|
||||||
let terms = btreemap! {
|
let terms = btreemap! {
|
||||||
String::from("rust") =>0.9f32,
|
String::from("rust") =>0.9f32,
|
||||||
String::from("language") => 1.0f32
|
String::from("language") => 1.0f32
|
||||||
};
|
};
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), TEST_TEXT, &terms, 20);
|
let fragments = search_fragments(&boxed_tokenizer, TEST_TEXT, &terms, 20);
|
||||||
//assert_eq!(fragments.len(), 7);
|
//assert_eq!(fragments.len(), 7);
|
||||||
{
|
{
|
||||||
let first = &fragments[0];
|
let first = &fragments[0];
|
||||||
@@ -405,12 +410,14 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_snippet_in_second_fragment() {
|
fn test_snippet_in_second_fragment() {
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
|
|
||||||
let text = "a b c d e f g";
|
let text = "a b c d e f g";
|
||||||
|
|
||||||
let mut terms = BTreeMap::new();
|
let mut terms = BTreeMap::new();
|
||||||
terms.insert(String::from("c"), 1.0);
|
terms.insert(String::from("c"), 1.0);
|
||||||
|
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||||
|
|
||||||
assert_eq!(fragments.len(), 1);
|
assert_eq!(fragments.len(), 1);
|
||||||
{
|
{
|
||||||
@@ -427,12 +434,14 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_snippet_with_term_at_the_end_of_fragment() {
|
fn test_snippet_with_term_at_the_end_of_fragment() {
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
|
|
||||||
let text = "a b c d e f f g";
|
let text = "a b c d e f f g";
|
||||||
|
|
||||||
let mut terms = BTreeMap::new();
|
let mut terms = BTreeMap::new();
|
||||||
terms.insert(String::from("f"), 1.0);
|
terms.insert(String::from("f"), 1.0);
|
||||||
|
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||||
|
|
||||||
assert_eq!(fragments.len(), 2);
|
assert_eq!(fragments.len(), 2);
|
||||||
{
|
{
|
||||||
@@ -449,13 +458,15 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_snippet_with_second_fragment_has_the_highest_score() {
|
fn test_snippet_with_second_fragment_has_the_highest_score() {
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
|
|
||||||
let text = "a b c d e f g";
|
let text = "a b c d e f g";
|
||||||
|
|
||||||
let mut terms = BTreeMap::new();
|
let mut terms = BTreeMap::new();
|
||||||
terms.insert(String::from("f"), 1.0);
|
terms.insert(String::from("f"), 1.0);
|
||||||
terms.insert(String::from("a"), 0.9);
|
terms.insert(String::from("a"), 0.9);
|
||||||
|
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 7);
|
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 7);
|
||||||
|
|
||||||
assert_eq!(fragments.len(), 2);
|
assert_eq!(fragments.len(), 2);
|
||||||
{
|
{
|
||||||
@@ -472,12 +483,14 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_snippet_with_term_not_in_text() {
|
fn test_snippet_with_term_not_in_text() {
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
|
|
||||||
let text = "a b c d";
|
let text = "a b c d";
|
||||||
|
|
||||||
let mut terms = BTreeMap::new();
|
let mut terms = BTreeMap::new();
|
||||||
terms.insert(String::from("z"), 1.0);
|
terms.insert(String::from("z"), 1.0);
|
||||||
|
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||||
|
|
||||||
assert_eq!(fragments.len(), 0);
|
assert_eq!(fragments.len(), 0);
|
||||||
|
|
||||||
@@ -488,10 +501,12 @@ Survey in 2016, 2017, and 2018."#;
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_snippet_with_no_terms() {
|
fn test_snippet_with_no_terms() {
|
||||||
|
let boxed_tokenizer = SimpleTokenizer.into();
|
||||||
|
|
||||||
let text = "a b c d";
|
let text = "a b c d";
|
||||||
|
|
||||||
let terms = BTreeMap::new();
|
let terms = BTreeMap::new();
|
||||||
let fragments = search_fragments(&From::from(SimpleTokenizer), &text, &terms, 3);
|
let fragments = search_fragments(&boxed_tokenizer, &text, &terms, 3);
|
||||||
assert_eq!(fragments.len(), 0);
|
assert_eq!(fragments.len(), 0);
|
||||||
|
|
||||||
let snippet = select_best_fragment_combination(&fragments[..], &text);
|
let snippet = select_best_fragment_combination(&fragments[..], &text);
|
||||||
|
|||||||
@@ -1,9 +1,6 @@
|
|||||||
use std::io::{self, Read, Write};
|
extern crate lz4;
|
||||||
|
|
||||||
/// Name of the compression scheme used in the doc store.
|
use std::io::{self, Read, Write};
|
||||||
///
|
|
||||||
/// This name is appended to the version string of tantivy.
|
|
||||||
pub const COMPRESSION: &'static str = "lz4";
|
|
||||||
|
|
||||||
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
||||||
compressed.clear();
|
compressed.clear();
|
||||||
|
|||||||
@@ -2,11 +2,6 @@ use snap;
|
|||||||
|
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
|
|
||||||
/// Name of the compression scheme used in the doc store.
|
|
||||||
///
|
|
||||||
/// This name is appended to the version string of tantivy.
|
|
||||||
pub const COMPRESSION: &str = "snappy";
|
|
||||||
|
|
||||||
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
|
||||||
compressed.clear();
|
compressed.clear();
|
||||||
let mut encoder = snap::Writer::new(compressed);
|
let mut encoder = snap::Writer::new(compressed);
|
||||||
|
|||||||
@@ -42,16 +42,12 @@ pub use self::writer::StoreWriter;
|
|||||||
#[cfg(feature = "lz4")]
|
#[cfg(feature = "lz4")]
|
||||||
mod compression_lz4;
|
mod compression_lz4;
|
||||||
#[cfg(feature = "lz4")]
|
#[cfg(feature = "lz4")]
|
||||||
pub use self::compression_lz4::COMPRESSION;
|
use self::compression_lz4::*;
|
||||||
#[cfg(feature = "lz4")]
|
|
||||||
use self::compression_lz4::{compress, decompress};
|
|
||||||
|
|
||||||
#[cfg(not(feature = "lz4"))]
|
#[cfg(not(feature = "lz4"))]
|
||||||
mod compression_snap;
|
mod compression_snap;
|
||||||
#[cfg(not(feature = "lz4"))]
|
#[cfg(not(feature = "lz4"))]
|
||||||
pub use self::compression_snap::COMPRESSION;
|
use self::compression_snap::*;
|
||||||
#[cfg(not(feature = "lz4"))]
|
|
||||||
use self::compression_snap::{compress, decompress};
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
|
|||||||
@@ -2,7 +2,9 @@
|
|||||||
//! ```rust
|
//! ```rust
|
||||||
//! use tantivy::tokenizer::*;
|
//! use tantivy::tokenizer::*;
|
||||||
//!
|
//!
|
||||||
//! let tokenizer = TextAnalyzer::from(RawTokenizer)
|
//! # fn main() {
|
||||||
|
//!
|
||||||
|
//! let tokenizer = RawTokenizer
|
||||||
//! .filter(AlphaNumOnlyFilter);
|
//! .filter(AlphaNumOnlyFilter);
|
||||||
//!
|
//!
|
||||||
//! let mut stream = tokenizer.token_stream("hello there");
|
//! let mut stream = tokenizer.token_stream("hello there");
|
||||||
@@ -10,7 +12,7 @@
|
|||||||
//! // contains a space
|
//! // contains a space
|
||||||
//! assert!(stream.next().is_none());
|
//! assert!(stream.next().is_none());
|
||||||
//!
|
//!
|
||||||
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
//! let tokenizer = SimpleTokenizer
|
||||||
//! .filter(AlphaNumOnlyFilter);
|
//! .filter(AlphaNumOnlyFilter);
|
||||||
//!
|
//!
|
||||||
//! let mut stream = tokenizer.token_stream("hello there 💣");
|
//! let mut stream = tokenizer.token_stream("hello there 💣");
|
||||||
@@ -18,31 +20,58 @@
|
|||||||
//! assert!(stream.next().is_some());
|
//! assert!(stream.next().is_some());
|
||||||
//! // the "emoji" is dropped because its not an alphanum
|
//! // the "emoji" is dropped because its not an alphanum
|
||||||
//! assert!(stream.next().is_none());
|
//! assert!(stream.next().is_none());
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
|
use super::{Token, TokenFilter, TokenStream};
|
||||||
|
|
||||||
/// `TokenFilter` that removes all tokens that contain non
|
/// `TokenFilter` that removes all tokens that contain non
|
||||||
/// ascii alphanumeric characters.
|
/// ascii alphanumeric characters.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AlphaNumOnlyFilter;
|
pub struct AlphaNumOnlyFilter;
|
||||||
|
|
||||||
pub struct AlphaNumOnlyFilterStream<'a> {
|
pub struct AlphaNumOnlyFilterStream<TailTokenStream>
|
||||||
tail: BoxTokenStream<'a>,
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
tail: TailTokenStream,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> AlphaNumOnlyFilterStream<'a> {
|
impl<TailTokenStream> AlphaNumOnlyFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
fn predicate(&self, token: &Token) -> bool {
|
fn predicate(&self, token: &Token) -> bool {
|
||||||
token.text.chars().all(|c| c.is_ascii_alphanumeric())
|
token.text.chars().all(|c| c.is_ascii_alphanumeric())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl TokenFilter for AlphaNumOnlyFilter {
|
fn wrap(tail: TailTokenStream) -> AlphaNumOnlyFilterStream<TailTokenStream> {
|
||||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
AlphaNumOnlyFilterStream { tail }
|
||||||
BoxTokenStream::from(AlphaNumOnlyFilterStream { tail: token_stream })
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStream for AlphaNumOnlyFilterStream<'a> {
|
impl<TailTokenStream> TokenFilter<TailTokenStream> for AlphaNumOnlyFilter
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
type ResultTokenStream = AlphaNumOnlyFilterStream<TailTokenStream>;
|
||||||
|
|
||||||
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||||
|
AlphaNumOnlyFilterStream::wrap(token_stream)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TailTokenStream> TokenStream for AlphaNumOnlyFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
fn token(&self) -> &Token {
|
||||||
|
self.tail.token()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn token_mut(&mut self) -> &mut Token {
|
||||||
|
self.tail.token_mut()
|
||||||
|
}
|
||||||
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
while self.tail.advance() {
|
while self.tail.advance() {
|
||||||
if self.predicate(self.tail.token()) {
|
if self.predicate(self.tail.token()) {
|
||||||
@@ -52,12 +81,4 @@ impl<'a> TokenStream for AlphaNumOnlyFilterStream<'a> {
|
|||||||
|
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
|
||||||
self.tail.token()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn token_mut(&mut self) -> &mut Token {
|
|
||||||
self.tail.token_mut()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use super::{BoxTokenStream, Token, TokenFilter, TokenStream};
|
use super::{Token, TokenFilter, TokenStream};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
|
||||||
/// This class converts alphabetic, numeric, and symbolic Unicode characters
|
/// This class converts alphabetic, numeric, and symbolic Unicode characters
|
||||||
@@ -7,21 +7,26 @@ use std::mem;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AsciiFoldingFilter;
|
pub struct AsciiFoldingFilter;
|
||||||
|
|
||||||
impl TokenFilter for AsciiFoldingFilter {
|
impl<TailTokenStream> TokenFilter<TailTokenStream> for AsciiFoldingFilter
|
||||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
where
|
||||||
From::from(AsciiFoldingFilterTokenStream {
|
TailTokenStream: TokenStream,
|
||||||
tail: token_stream,
|
{
|
||||||
buffer: String::with_capacity(100),
|
type ResultTokenStream = AsciiFoldingFilterTokenStream<TailTokenStream>;
|
||||||
})
|
|
||||||
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||||
|
AsciiFoldingFilterTokenStream::wrap(token_stream)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct AsciiFoldingFilterTokenStream<'a> {
|
pub struct AsciiFoldingFilterTokenStream<TailTokenStream> {
|
||||||
buffer: String,
|
buffer: String,
|
||||||
tail: BoxTokenStream<'a>,
|
tail: TailTokenStream,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStream for AsciiFoldingFilterTokenStream<'a> {
|
impl<TailTokenStream> TokenStream for AsciiFoldingFilterTokenStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
if !self.tail.advance() {
|
if !self.tail.advance() {
|
||||||
return false;
|
return false;
|
||||||
@@ -43,6 +48,18 @@ impl<'a> TokenStream for AsciiFoldingFilterTokenStream<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<TailTokenStream> AsciiFoldingFilterTokenStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
fn wrap(tail: TailTokenStream) -> AsciiFoldingFilterTokenStream<TailTokenStream> {
|
||||||
|
AsciiFoldingFilterTokenStream {
|
||||||
|
tail,
|
||||||
|
buffer: String::with_capacity(100),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Returns a string that represents the ascii folded version of
|
// Returns a string that represents the ascii folded version of
|
||||||
// the character. If the `char` does not require ascii folding
|
// the character. If the `char` does not require ascii folding
|
||||||
// (e.g. simple ASCII chars like `A`) or if the `char`
|
// (e.g. simple ASCII chars like `A`) or if the `char`
|
||||||
@@ -1544,7 +1561,8 @@ mod tests {
|
|||||||
use crate::tokenizer::AsciiFoldingFilter;
|
use crate::tokenizer::AsciiFoldingFilter;
|
||||||
use crate::tokenizer::RawTokenizer;
|
use crate::tokenizer::RawTokenizer;
|
||||||
use crate::tokenizer::SimpleTokenizer;
|
use crate::tokenizer::SimpleTokenizer;
|
||||||
use crate::tokenizer::TextAnalyzer;
|
use crate::tokenizer::TokenStream;
|
||||||
|
use crate::tokenizer::Tokenizer;
|
||||||
use std::iter;
|
use std::iter;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1561,7 +1579,7 @@ mod tests {
|
|||||||
|
|
||||||
fn folding_helper(text: &str) -> Vec<String> {
|
fn folding_helper(text: &str) -> Vec<String> {
|
||||||
let mut tokens = Vec::new();
|
let mut tokens = Vec::new();
|
||||||
TextAnalyzer::from(SimpleTokenizer)
|
SimpleTokenizer
|
||||||
.filter(AsciiFoldingFilter)
|
.filter(AsciiFoldingFilter)
|
||||||
.token_stream(text)
|
.token_stream(text)
|
||||||
.process(&mut |token| {
|
.process(&mut |token| {
|
||||||
@@ -1571,9 +1589,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn folding_using_raw_tokenizer_helper(text: &str) -> String {
|
fn folding_using_raw_tokenizer_helper(text: &str) -> String {
|
||||||
let mut token_stream = TextAnalyzer::from(RawTokenizer)
|
let mut token_stream = RawTokenizer.filter(AsciiFoldingFilter).token_stream(text);
|
||||||
.filter(AsciiFoldingFilter)
|
|
||||||
.token_stream(text);
|
|
||||||
token_stream.advance();
|
token_stream.advance();
|
||||||
token_stream.token().text.clone()
|
token_stream.token().text.clone()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use super::{BoxTokenStream, Token, TokenStream, Tokenizer};
|
use super::{Token, TokenStream, Tokenizer};
|
||||||
use crate::schema::FACET_SEP_BYTE;
|
use crate::schema::FACET_SEP_BYTE;
|
||||||
|
|
||||||
/// The `FacetTokenizer` process a `Facet` binary representation
|
/// The `FacetTokenizer` process a `Facet` binary representation
|
||||||
@@ -25,14 +25,15 @@ pub struct FacetTokenStream<'a> {
|
|||||||
token: Token,
|
token: Token,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tokenizer for FacetTokenizer {
|
impl<'a> Tokenizer<'a> for FacetTokenizer {
|
||||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
type TokenStreamImpl = FacetTokenStream<'a>;
|
||||||
|
|
||||||
|
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||||
FacetTokenStream {
|
FacetTokenStream {
|
||||||
text,
|
text,
|
||||||
state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet.
|
state: State::RootFacetNotEmitted, //< pos is the first char that has not been processed yet.
|
||||||
token: Token::default(),
|
token: Token::default(),
|
||||||
}
|
}
|
||||||
.into()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,7 +84,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::FacetTokenizer;
|
use super::FacetTokenizer;
|
||||||
use crate::schema::Facet;
|
use crate::schema::Facet;
|
||||||
use crate::tokenizer::{Token, Tokenizer};
|
use crate::tokenizer::{Token, TokenStream, Tokenizer};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_facet_tokenizer() {
|
fn test_facet_tokenizer() {
|
||||||
|
|||||||
@@ -1,23 +1,24 @@
|
|||||||
use super::{Token, TokenFilter, TokenStream};
|
use super::{Token, TokenFilter, TokenStream};
|
||||||
use crate::tokenizer::BoxTokenStream;
|
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
|
||||||
impl TokenFilter for LowerCaser {
|
|
||||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
|
||||||
BoxTokenStream::from(LowerCaserTokenStream {
|
|
||||||
tail: token_stream,
|
|
||||||
buffer: String::with_capacity(100),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Token filter that lowercase terms.
|
/// Token filter that lowercase terms.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct LowerCaser;
|
pub struct LowerCaser;
|
||||||
|
|
||||||
pub struct LowerCaserTokenStream<'a> {
|
impl<TailTokenStream> TokenFilter<TailTokenStream> for LowerCaser
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
type ResultTokenStream = LowerCaserTokenStream<TailTokenStream>;
|
||||||
|
|
||||||
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||||
|
LowerCaserTokenStream::wrap(token_stream)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LowerCaserTokenStream<TailTokenStream> {
|
||||||
buffer: String,
|
buffer: String,
|
||||||
tail: BoxTokenStream<'a>,
|
tail: TailTokenStream,
|
||||||
}
|
}
|
||||||
|
|
||||||
// writes a lowercased version of text into output.
|
// writes a lowercased version of text into output.
|
||||||
@@ -30,7 +31,18 @@ fn to_lowercase_unicode(text: &mut String, output: &mut String) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStream for LowerCaserTokenStream<'a> {
|
impl<TailTokenStream> TokenStream for LowerCaserTokenStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
fn token(&self) -> &Token {
|
||||||
|
self.tail.token()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn token_mut(&mut self) -> &mut Token {
|
||||||
|
self.tail.token_mut()
|
||||||
|
}
|
||||||
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
if !self.tail.advance() {
|
if !self.tail.advance() {
|
||||||
return false;
|
return false;
|
||||||
@@ -44,19 +56,26 @@ impl<'a> TokenStream for LowerCaserTokenStream<'a> {
|
|||||||
}
|
}
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
impl<TailTokenStream> LowerCaserTokenStream<TailTokenStream>
|
||||||
self.tail.token()
|
where
|
||||||
}
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
fn token_mut(&mut self) -> &mut Token {
|
fn wrap(tail: TailTokenStream) -> LowerCaserTokenStream<TailTokenStream> {
|
||||||
self.tail.token_mut()
|
LowerCaserTokenStream {
|
||||||
|
tail,
|
||||||
|
buffer: String::with_capacity(100),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::tokenizer::{LowerCaser, SimpleTokenizer, TextAnalyzer};
|
use crate::tokenizer::LowerCaser;
|
||||||
|
use crate::tokenizer::SimpleTokenizer;
|
||||||
|
use crate::tokenizer::TokenStream;
|
||||||
|
use crate::tokenizer::Tokenizer;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_to_lower_case() {
|
fn test_to_lower_case() {
|
||||||
@@ -68,9 +87,7 @@ mod tests {
|
|||||||
|
|
||||||
fn lowercase_helper(text: &str) -> Vec<String> {
|
fn lowercase_helper(text: &str) -> Vec<String> {
|
||||||
let mut tokens = vec![];
|
let mut tokens = vec![];
|
||||||
let mut token_stream = TextAnalyzer::from(SimpleTokenizer)
|
let mut token_stream = SimpleTokenizer.filter(LowerCaser).token_stream(text);
|
||||||
.filter(LowerCaser)
|
|
||||||
.token_stream(text);
|
|
||||||
while token_stream.advance() {
|
while token_stream.advance() {
|
||||||
let token_text = token_stream.token().text.clone();
|
let token_text = token_stream.token().text.clone();
|
||||||
tokens.push(token_text);
|
tokens.push(token_text);
|
||||||
|
|||||||
@@ -7,6 +7,7 @@
|
|||||||
//! ```rust
|
//! ```rust
|
||||||
//! use tantivy::schema::*;
|
//! use tantivy::schema::*;
|
||||||
//!
|
//!
|
||||||
|
//! # fn main() {
|
||||||
//! let mut schema_builder = Schema::builder();
|
//! let mut schema_builder = Schema::builder();
|
||||||
//!
|
//!
|
||||||
//! let text_options = TextOptions::default()
|
//! let text_options = TextOptions::default()
|
||||||
@@ -30,6 +31,7 @@
|
|||||||
//! schema_builder.add_text_field("uuid", id_options);
|
//! schema_builder.add_text_field("uuid", id_options);
|
||||||
//!
|
//!
|
||||||
//! let schema = schema_builder.build();
|
//! let schema = schema_builder.build();
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! By default, `tantivy` offers the following tokenizers:
|
//! By default, `tantivy` offers the following tokenizers:
|
||||||
@@ -64,10 +66,12 @@
|
|||||||
//! ```rust
|
//! ```rust
|
||||||
//! use tantivy::tokenizer::*;
|
//! use tantivy::tokenizer::*;
|
||||||
//!
|
//!
|
||||||
//! let en_stem = TextAnalyzer::from(SimpleTokenizer)
|
//! # fn main() {
|
||||||
|
//! let en_stem = SimpleTokenizer
|
||||||
//! .filter(RemoveLongFilter::limit(40))
|
//! .filter(RemoveLongFilter::limit(40))
|
||||||
//! .filter(LowerCaser)
|
//! .filter(LowerCaser)
|
||||||
//! .filter(Stemmer::new(Language::English));
|
//! .filter(Stemmer::new(Language::English));
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! Once your tokenizer is defined, you need to
|
//! Once your tokenizer is defined, you need to
|
||||||
@@ -77,12 +81,13 @@
|
|||||||
//! # use tantivy::schema::Schema;
|
//! # use tantivy::schema::Schema;
|
||||||
//! # use tantivy::tokenizer::*;
|
//! # use tantivy::tokenizer::*;
|
||||||
//! # use tantivy::Index;
|
//! # use tantivy::Index;
|
||||||
//! #
|
//! # fn main() {
|
||||||
//! let custom_en_tokenizer = SimpleTokenizer;
|
//! # let custom_en_tokenizer = SimpleTokenizer;
|
||||||
//! # let schema = Schema::builder().build();
|
//! # let schema = Schema::builder().build();
|
||||||
//! let index = Index::create_in_ram(schema);
|
//! let index = Index::create_in_ram(schema);
|
||||||
//! index.tokenizers()
|
//! index.tokenizers()
|
||||||
//! .register("custom_en", custom_en_tokenizer);
|
//! .register("custom_en", custom_en_tokenizer);
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! If you built your schema programmatically, a complete example
|
//! If you built your schema programmatically, a complete example
|
||||||
@@ -97,6 +102,7 @@
|
|||||||
//! use tantivy::tokenizer::*;
|
//! use tantivy::tokenizer::*;
|
||||||
//! use tantivy::Index;
|
//! use tantivy::Index;
|
||||||
//!
|
//!
|
||||||
|
//! # fn main() {
|
||||||
//! let mut schema_builder = Schema::builder();
|
//! let mut schema_builder = Schema::builder();
|
||||||
//! let text_field_indexing = TextFieldIndexing::default()
|
//! let text_field_indexing = TextFieldIndexing::default()
|
||||||
//! .set_tokenizer("custom_en")
|
//! .set_tokenizer("custom_en")
|
||||||
@@ -109,12 +115,14 @@
|
|||||||
//! let index = Index::create_in_ram(schema);
|
//! let index = Index::create_in_ram(schema);
|
||||||
//!
|
//!
|
||||||
//! // We need to register our tokenizer :
|
//! // We need to register our tokenizer :
|
||||||
//! let custom_en_tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
//! let custom_en_tokenizer = SimpleTokenizer
|
||||||
//! .filter(RemoveLongFilter::limit(40))
|
//! .filter(RemoveLongFilter::limit(40))
|
||||||
//! .filter(LowerCaser);
|
//! .filter(LowerCaser);
|
||||||
//! index
|
//! index
|
||||||
//! .tokenizers()
|
//! .tokenizers()
|
||||||
//! .register("custom_en", custom_en_tokenizer);
|
//! .register("custom_en", custom_en_tokenizer);
|
||||||
|
//! // ...
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
mod alphanum_only;
|
mod alphanum_only;
|
||||||
@@ -128,7 +136,6 @@ mod simple_tokenizer;
|
|||||||
mod stemmer;
|
mod stemmer;
|
||||||
mod stop_word_filter;
|
mod stop_word_filter;
|
||||||
mod token_stream_chain;
|
mod token_stream_chain;
|
||||||
mod tokenized_string;
|
|
||||||
mod tokenizer;
|
mod tokenizer;
|
||||||
mod tokenizer_manager;
|
mod tokenizer_manager;
|
||||||
|
|
||||||
@@ -143,12 +150,9 @@ pub use self::simple_tokenizer::SimpleTokenizer;
|
|||||||
pub use self::stemmer::{Language, Stemmer};
|
pub use self::stemmer::{Language, Stemmer};
|
||||||
pub use self::stop_word_filter::StopWordFilter;
|
pub use self::stop_word_filter::StopWordFilter;
|
||||||
pub(crate) use self::token_stream_chain::TokenStreamChain;
|
pub(crate) use self::token_stream_chain::TokenStreamChain;
|
||||||
|
pub use self::tokenizer::BoxedTokenizer;
|
||||||
|
|
||||||
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
|
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
|
||||||
pub use self::tokenizer::{
|
|
||||||
BoxTokenFilter, BoxTokenStream, TextAnalyzer, Token, TokenFilter, TokenStream, Tokenizer,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub use self::tokenizer_manager::TokenizerManager;
|
pub use self::tokenizer_manager::TokenizerManager;
|
||||||
|
|
||||||
/// Maximum authorized len (in bytes) for a token.
|
/// Maximum authorized len (in bytes) for a token.
|
||||||
@@ -161,9 +165,9 @@ pub const MAX_TOKEN_LEN: usize = u16::max_value() as usize - 4;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use super::{
|
use super::{
|
||||||
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, TokenizerManager,
|
Language, LowerCaser, RemoveLongFilter, SimpleTokenizer, Stemmer, Token, Tokenizer,
|
||||||
|
TokenizerManager,
|
||||||
};
|
};
|
||||||
use crate::tokenizer::TextAnalyzer;
|
|
||||||
|
|
||||||
/// This is a function that can be used in tests and doc tests
|
/// This is a function that can be used in tests and doc tests
|
||||||
/// to assert a token's correctness.
|
/// to assert a token's correctness.
|
||||||
@@ -230,7 +234,7 @@ pub mod tests {
|
|||||||
let tokenizer_manager = TokenizerManager::default();
|
let tokenizer_manager = TokenizerManager::default();
|
||||||
tokenizer_manager.register(
|
tokenizer_manager.register(
|
||||||
"el_stem",
|
"el_stem",
|
||||||
TextAnalyzer::from(SimpleTokenizer)
|
SimpleTokenizer
|
||||||
.filter(RemoveLongFilter::limit(40))
|
.filter(RemoveLongFilter::limit(40))
|
||||||
.filter(LowerCaser)
|
.filter(LowerCaser)
|
||||||
.filter(Stemmer::new(Language::Greek)),
|
.filter(Stemmer::new(Language::Greek)),
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use super::{Token, TokenStream, Tokenizer};
|
use super::{Token, TokenStream, Tokenizer};
|
||||||
use crate::tokenizer::BoxTokenStream;
|
|
||||||
|
|
||||||
/// Tokenize the text by splitting words into n-grams of the given size(s)
|
/// Tokenize the text by splitting words into n-grams of the given size(s)
|
||||||
///
|
///
|
||||||
@@ -32,7 +31,7 @@ use crate::tokenizer::BoxTokenStream;
|
|||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use tantivy::tokenizer::*;
|
/// use tantivy::tokenizer::*;
|
||||||
///
|
/// # fn main() {
|
||||||
/// let tokenizer = NgramTokenizer::new(2, 3, false);
|
/// let tokenizer = NgramTokenizer::new(2, 3, false);
|
||||||
/// let mut stream = tokenizer.token_stream("hello");
|
/// let mut stream = tokenizer.token_stream("hello");
|
||||||
/// {
|
/// {
|
||||||
@@ -78,6 +77,7 @@ use crate::tokenizer::BoxTokenStream;
|
|||||||
/// assert_eq!(token.offset_to, 5);
|
/// assert_eq!(token.offset_to, 5);
|
||||||
/// }
|
/// }
|
||||||
/// assert!(stream.next().is_none());
|
/// assert!(stream.next().is_none());
|
||||||
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct NgramTokenizer {
|
pub struct NgramTokenizer {
|
||||||
@@ -130,9 +130,11 @@ pub struct NgramTokenStream<'a> {
|
|||||||
token: Token,
|
token: Token,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tokenizer for NgramTokenizer {
|
impl<'a> Tokenizer<'a> for NgramTokenizer {
|
||||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
type TokenStreamImpl = NgramTokenStream<'a>;
|
||||||
From::from(NgramTokenStream {
|
|
||||||
|
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||||
|
NgramTokenStream {
|
||||||
ngram_charidx_iterator: StutteringIterator::new(
|
ngram_charidx_iterator: StutteringIterator::new(
|
||||||
CodepointFrontiers::for_str(text),
|
CodepointFrontiers::for_str(text),
|
||||||
self.min_gram,
|
self.min_gram,
|
||||||
@@ -141,7 +143,7 @@ impl Tokenizer for NgramTokenizer {
|
|||||||
prefix_only: self.prefix_only,
|
prefix_only: self.prefix_only,
|
||||||
text,
|
text,
|
||||||
token: Token::default(),
|
token: Token::default(),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -307,10 +309,10 @@ mod tests {
|
|||||||
use super::NgramTokenizer;
|
use super::NgramTokenizer;
|
||||||
use super::StutteringIterator;
|
use super::StutteringIterator;
|
||||||
use crate::tokenizer::tests::assert_token;
|
use crate::tokenizer::tests::assert_token;
|
||||||
use crate::tokenizer::tokenizer::Tokenizer;
|
use crate::tokenizer::tokenizer::{TokenStream, Tokenizer};
|
||||||
use crate::tokenizer::{BoxTokenStream, Token};
|
use crate::tokenizer::Token;
|
||||||
|
|
||||||
fn test_helper(mut tokenizer: BoxTokenStream) -> Vec<Token> {
|
fn test_helper<T: TokenStream>(mut tokenizer: T) -> Vec<Token> {
|
||||||
let mut tokens: Vec<Token> = vec![];
|
let mut tokens: Vec<Token> = vec![];
|
||||||
tokenizer.process(&mut |token: &Token| tokens.push(token.clone()));
|
tokenizer.process(&mut |token: &Token| tokens.push(token.clone()));
|
||||||
tokens
|
tokens
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use super::{Token, TokenStream, Tokenizer};
|
use super::{Token, TokenStream, Tokenizer};
|
||||||
use crate::tokenizer::BoxTokenStream;
|
|
||||||
|
|
||||||
/// For each value of the field, emit a single unprocessed token.
|
/// For each value of the field, emit a single unprocessed token.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -10,8 +9,10 @@ pub struct RawTokenStream {
|
|||||||
has_token: bool,
|
has_token: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tokenizer for RawTokenizer {
|
impl<'a> Tokenizer<'a> for RawTokenizer {
|
||||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
type TokenStreamImpl = RawTokenStream;
|
||||||
|
|
||||||
|
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||||
let token = Token {
|
let token = Token {
|
||||||
offset_from: 0,
|
offset_from: 0,
|
||||||
offset_to: text.len(),
|
offset_to: text.len(),
|
||||||
@@ -23,7 +24,6 @@ impl Tokenizer for RawTokenizer {
|
|||||||
token,
|
token,
|
||||||
has_token: true,
|
has_token: true,
|
||||||
}
|
}
|
||||||
.into()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,9 @@
|
|||||||
//! ```rust
|
//! ```rust
|
||||||
//! use tantivy::tokenizer::*;
|
//! use tantivy::tokenizer::*;
|
||||||
//!
|
//!
|
||||||
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
//! # fn main() {
|
||||||
|
//!
|
||||||
|
//! let tokenizer = SimpleTokenizer
|
||||||
//! .filter(RemoveLongFilter::limit(5));
|
//! .filter(RemoveLongFilter::limit(5));
|
||||||
//!
|
//!
|
||||||
//! let mut stream = tokenizer.token_stream("toolong nice");
|
//! let mut stream = tokenizer.token_stream("toolong nice");
|
||||||
@@ -10,10 +12,10 @@
|
|||||||
//! // out of the token stream.
|
//! // out of the token stream.
|
||||||
//! assert_eq!(stream.next().unwrap().text, "nice");
|
//! assert_eq!(stream.next().unwrap().text, "nice");
|
||||||
//! assert!(stream.next().is_none());
|
//! assert!(stream.next().is_none());
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
use super::{Token, TokenFilter, TokenStream};
|
use super::{Token, TokenFilter, TokenStream};
|
||||||
use crate::tokenizer::BoxTokenStream;
|
|
||||||
|
|
||||||
/// `RemoveLongFilter` removes tokens that are longer
|
/// `RemoveLongFilter` removes tokens that are longer
|
||||||
/// than a given number of bytes (in UTF-8 representation).
|
/// than a given number of bytes (in UTF-8 representation).
|
||||||
@@ -32,27 +34,56 @@ impl RemoveLongFilter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> RemoveLongFilterStream<'a> {
|
impl<TailTokenStream> RemoveLongFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
fn predicate(&self, token: &Token) -> bool {
|
fn predicate(&self, token: &Token) -> bool {
|
||||||
token.text.len() < self.token_length_limit
|
token.text.len() < self.token_length_limit
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl TokenFilter for RemoveLongFilter {
|
fn wrap(
|
||||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
token_length_limit: usize,
|
||||||
BoxTokenStream::from(RemoveLongFilterStream {
|
tail: TailTokenStream,
|
||||||
token_length_limit: self.length_limit,
|
) -> RemoveLongFilterStream<TailTokenStream> {
|
||||||
tail: token_stream,
|
RemoveLongFilterStream {
|
||||||
})
|
token_length_limit,
|
||||||
|
tail,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RemoveLongFilterStream<'a> {
|
impl<TailTokenStream> TokenFilter<TailTokenStream> for RemoveLongFilter
|
||||||
token_length_limit: usize,
|
where
|
||||||
tail: BoxTokenStream<'a>,
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
type ResultTokenStream = RemoveLongFilterStream<TailTokenStream>;
|
||||||
|
|
||||||
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||||
|
RemoveLongFilterStream::wrap(self.length_limit, token_stream)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStream for RemoveLongFilterStream<'a> {
|
pub struct RemoveLongFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
token_length_limit: usize,
|
||||||
|
tail: TailTokenStream,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TailTokenStream> TokenStream for RemoveLongFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
fn token(&self) -> &Token {
|
||||||
|
self.tail.token()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn token_mut(&mut self) -> &mut Token {
|
||||||
|
self.tail.token_mut()
|
||||||
|
}
|
||||||
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
while self.tail.advance() {
|
while self.tail.advance() {
|
||||||
if self.predicate(self.tail.token()) {
|
if self.predicate(self.tail.token()) {
|
||||||
@@ -61,12 +92,4 @@ impl<'a> TokenStream for RemoveLongFilterStream<'a> {
|
|||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
|
||||||
self.tail.token()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn token_mut(&mut self) -> &mut Token {
|
|
||||||
self.tail.token_mut()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use super::BoxTokenStream;
|
|
||||||
use super::{Token, TokenStream, Tokenizer};
|
use super::{Token, TokenStream, Tokenizer};
|
||||||
use std::str::CharIndices;
|
use std::str::CharIndices;
|
||||||
|
|
||||||
@@ -12,13 +11,15 @@ pub struct SimpleTokenStream<'a> {
|
|||||||
token: Token,
|
token: Token,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tokenizer for SimpleTokenizer {
|
impl<'a> Tokenizer<'a> for SimpleTokenizer {
|
||||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
type TokenStreamImpl = SimpleTokenStream<'a>;
|
||||||
BoxTokenStream::from(SimpleTokenStream {
|
|
||||||
|
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||||
|
SimpleTokenStream {
|
||||||
text,
|
text,
|
||||||
chars: text.char_indices(),
|
chars: text.char_indices(),
|
||||||
token: Token::default(),
|
token: Token::default(),
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use super::{Token, TokenFilter, TokenStream};
|
use super::{Token, TokenFilter, TokenStream};
|
||||||
use crate::tokenizer::BoxTokenStream;
|
|
||||||
use rust_stemmers::{self, Algorithm};
|
use rust_stemmers::{self, Algorithm};
|
||||||
|
|
||||||
/// Available stemmer languages.
|
/// Available stemmer languages.
|
||||||
@@ -16,7 +15,6 @@ pub enum Language {
|
|||||||
Greek,
|
Greek,
|
||||||
Hungarian,
|
Hungarian,
|
||||||
Italian,
|
Italian,
|
||||||
Norwegian,
|
|
||||||
Portuguese,
|
Portuguese,
|
||||||
Romanian,
|
Romanian,
|
||||||
Russian,
|
Russian,
|
||||||
@@ -40,7 +38,6 @@ impl Language {
|
|||||||
Greek => Algorithm::Greek,
|
Greek => Algorithm::Greek,
|
||||||
Hungarian => Algorithm::Hungarian,
|
Hungarian => Algorithm::Hungarian,
|
||||||
Italian => Algorithm::Italian,
|
Italian => Algorithm::Italian,
|
||||||
Norwegian => Algorithm::Norwegian,
|
|
||||||
Portuguese => Algorithm::Portuguese,
|
Portuguese => Algorithm::Portuguese,
|
||||||
Romanian => Algorithm::Romanian,
|
Romanian => Algorithm::Romanian,
|
||||||
Russian => Algorithm::Russian,
|
Russian => Algorithm::Russian,
|
||||||
@@ -76,22 +73,38 @@ impl Default for Stemmer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TokenFilter for Stemmer {
|
impl<TailTokenStream> TokenFilter<TailTokenStream> for Stemmer
|
||||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
type ResultTokenStream = StemmerTokenStream<TailTokenStream>;
|
||||||
|
|
||||||
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||||
let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm);
|
let inner_stemmer = rust_stemmers::Stemmer::create(self.stemmer_algorithm);
|
||||||
BoxTokenStream::from(StemmerTokenStream {
|
StemmerTokenStream::wrap(inner_stemmer, token_stream)
|
||||||
tail: token_stream,
|
|
||||||
stemmer: inner_stemmer,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct StemmerTokenStream<'a> {
|
pub struct StemmerTokenStream<TailTokenStream>
|
||||||
tail: BoxTokenStream<'a>,
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
tail: TailTokenStream,
|
||||||
stemmer: rust_stemmers::Stemmer,
|
stemmer: rust_stemmers::Stemmer,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStream for StemmerTokenStream<'a> {
|
impl<TailTokenStream> TokenStream for StemmerTokenStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
fn token(&self) -> &Token {
|
||||||
|
self.tail.token()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn token_mut(&mut self) -> &mut Token {
|
||||||
|
self.tail.token_mut()
|
||||||
|
}
|
||||||
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
if !self.tail.advance() {
|
if !self.tail.advance() {
|
||||||
return false;
|
return false;
|
||||||
@@ -102,12 +115,16 @@ impl<'a> TokenStream for StemmerTokenStream<'a> {
|
|||||||
self.token_mut().text.push_str(&stemmed_str);
|
self.token_mut().text.push_str(&stemmed_str);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
impl<TailTokenStream> StemmerTokenStream<TailTokenStream>
|
||||||
self.tail.token()
|
where
|
||||||
}
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
fn token_mut(&mut self) -> &mut Token {
|
fn wrap(
|
||||||
self.tail.token_mut()
|
stemmer: rust_stemmers::Stemmer,
|
||||||
|
tail: TailTokenStream,
|
||||||
|
) -> StemmerTokenStream<TailTokenStream> {
|
||||||
|
StemmerTokenStream { tail, stemmer }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,16 +2,17 @@
|
|||||||
//! ```rust
|
//! ```rust
|
||||||
//! use tantivy::tokenizer::*;
|
//! use tantivy::tokenizer::*;
|
||||||
//!
|
//!
|
||||||
//! let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
//! # fn main() {
|
||||||
|
//! let tokenizer = SimpleTokenizer
|
||||||
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
|
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
|
||||||
//!
|
//!
|
||||||
//! let mut stream = tokenizer.token_stream("the fox is crafty");
|
//! let mut stream = tokenizer.token_stream("the fox is crafty");
|
||||||
//! assert_eq!(stream.next().unwrap().text, "fox");
|
//! assert_eq!(stream.next().unwrap().text, "fox");
|
||||||
//! assert_eq!(stream.next().unwrap().text, "crafty");
|
//! assert_eq!(stream.next().unwrap().text, "crafty");
|
||||||
//! assert!(stream.next().is_none());
|
//! assert!(stream.next().is_none());
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
use super::{Token, TokenFilter, TokenStream};
|
use super::{Token, TokenFilter, TokenStream};
|
||||||
use crate::tokenizer::BoxTokenStream;
|
|
||||||
use fnv::FnvHasher;
|
use fnv::FnvHasher;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::hash::BuildHasherDefault;
|
use std::hash::BuildHasherDefault;
|
||||||
@@ -45,31 +46,57 @@ impl StopWordFilter {
|
|||||||
"there", "these", "they", "this", "to", "was", "will", "with",
|
"there", "these", "they", "this", "to", "was", "will", "with",
|
||||||
];
|
];
|
||||||
|
|
||||||
StopWordFilter::remove(words.iter().map(|&s| s.to_string()).collect())
|
StopWordFilter::remove(words.iter().map(|s| s.to_string()).collect())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct StopWordFilterStream<'a> {
|
pub struct StopWordFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
words: StopWordHashSet,
|
words: StopWordHashSet,
|
||||||
tail: BoxTokenStream<'a>,
|
tail: TailTokenStream,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TokenFilter for StopWordFilter {
|
impl<TailTokenStream> TokenFilter<TailTokenStream> for StopWordFilter
|
||||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a> {
|
where
|
||||||
BoxTokenStream::from(StopWordFilterStream {
|
TailTokenStream: TokenStream,
|
||||||
words: self.words.clone(),
|
{
|
||||||
tail: token_stream,
|
type ResultTokenStream = StopWordFilterStream<TailTokenStream>;
|
||||||
})
|
|
||||||
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream {
|
||||||
|
StopWordFilterStream::wrap(self.words.clone(), token_stream)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> StopWordFilterStream<'a> {
|
impl<TailTokenStream> StopWordFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
fn predicate(&self, token: &Token) -> bool {
|
fn predicate(&self, token: &Token) -> bool {
|
||||||
!self.words.contains(&token.text)
|
!self.words.contains(&token.text)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn wrap(
|
||||||
|
words: StopWordHashSet,
|
||||||
|
tail: TailTokenStream,
|
||||||
|
) -> StopWordFilterStream<TailTokenStream> {
|
||||||
|
StopWordFilterStream { words, tail }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStream for StopWordFilterStream<'a> {
|
impl<TailTokenStream> TokenStream for StopWordFilterStream<TailTokenStream>
|
||||||
|
where
|
||||||
|
TailTokenStream: TokenStream,
|
||||||
|
{
|
||||||
|
fn token(&self) -> &Token {
|
||||||
|
self.tail.token()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn token_mut(&mut self) -> &mut Token {
|
||||||
|
self.tail.token_mut()
|
||||||
|
}
|
||||||
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
while self.tail.advance() {
|
while self.tail.advance() {
|
||||||
if self.predicate(self.tail.token()) {
|
if self.predicate(self.tail.token()) {
|
||||||
@@ -78,14 +105,6 @@ impl<'a> TokenStream for StopWordFilterStream<'a> {
|
|||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
|
||||||
self.tail.token()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn token_mut(&mut self) -> &mut Token {
|
|
||||||
self.tail.token_mut()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for StopWordFilter {
|
impl Default for StopWordFilter {
|
||||||
|
|||||||
@@ -1,21 +1,23 @@
|
|||||||
use crate::tokenizer::{BoxTokenStream, Token, TokenStream};
|
use crate::tokenizer::{Token, TokenStream};
|
||||||
use std::ops::DerefMut;
|
|
||||||
|
|
||||||
const POSITION_GAP: usize = 2;
|
const POSITION_GAP: usize = 2;
|
||||||
|
|
||||||
pub(crate) struct TokenStreamChain<'a> {
|
pub(crate) struct TokenStreamChain<TTokenStream: TokenStream> {
|
||||||
offsets: Vec<usize>,
|
offsets: Vec<usize>,
|
||||||
token_streams: Vec<BoxTokenStream<'a>>,
|
token_streams: Vec<TTokenStream>,
|
||||||
position_shift: usize,
|
position_shift: usize,
|
||||||
stream_idx: usize,
|
stream_idx: usize,
|
||||||
token: Token,
|
token: Token,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStreamChain<'a> {
|
impl<'a, TTokenStream> TokenStreamChain<TTokenStream>
|
||||||
|
where
|
||||||
|
TTokenStream: TokenStream,
|
||||||
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
offsets: Vec<usize>,
|
offsets: Vec<usize>,
|
||||||
token_streams: Vec<BoxTokenStream<'a>>,
|
token_streams: Vec<TTokenStream>,
|
||||||
) -> TokenStreamChain<'a> {
|
) -> TokenStreamChain<TTokenStream> {
|
||||||
TokenStreamChain {
|
TokenStreamChain {
|
||||||
offsets,
|
offsets,
|
||||||
stream_idx: 0,
|
stream_idx: 0,
|
||||||
@@ -26,10 +28,13 @@ impl<'a> TokenStreamChain<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TokenStream for TokenStreamChain<'a> {
|
impl<'a, TTokenStream> TokenStream for TokenStreamChain<TTokenStream>
|
||||||
|
where
|
||||||
|
TTokenStream: TokenStream,
|
||||||
|
{
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
while self.stream_idx < self.token_streams.len() {
|
while self.stream_idx < self.token_streams.len() {
|
||||||
let token_stream = self.token_streams[self.stream_idx].deref_mut();
|
let token_stream = &mut self.token_streams[self.stream_idx];
|
||||||
if token_stream.advance() {
|
if token_stream.advance() {
|
||||||
let token = token_stream.token();
|
let token = token_stream.token();
|
||||||
let offset_offset = self.offsets[self.stream_idx];
|
let offset_offset = self.offsets[self.stream_idx];
|
||||||
|
|||||||
@@ -1,190 +0,0 @@
|
|||||||
use crate::tokenizer::{BoxTokenStream, Token, TokenStream, TokenStreamChain};
|
|
||||||
use std::cmp::Ordering;
|
|
||||||
|
|
||||||
/// Struct representing pre-tokenized text
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
|
||||||
pub struct PreTokenizedString {
|
|
||||||
/// Original text
|
|
||||||
pub text: String,
|
|
||||||
/// Tokens derived from the text
|
|
||||||
pub tokens: Vec<Token>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Ord for PreTokenizedString {
|
|
||||||
fn cmp(&self, other: &Self) -> Ordering {
|
|
||||||
self.text.cmp(&other.text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for PreTokenizedString {
|
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TokenStream implementation which wraps PreTokenizedString
|
|
||||||
pub struct PreTokenizedStream {
|
|
||||||
tokenized_string: PreTokenizedString,
|
|
||||||
current_token: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<PreTokenizedString> for PreTokenizedStream {
|
|
||||||
fn from(s: PreTokenizedString) -> PreTokenizedStream {
|
|
||||||
PreTokenizedStream {
|
|
||||||
tokenized_string: s,
|
|
||||||
current_token: -1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PreTokenizedStream {
|
|
||||||
/// Creates a TokenStream from PreTokenizedString array
|
|
||||||
pub fn chain_tokenized_strings<'a>(
|
|
||||||
tok_strings: &'a [&'a PreTokenizedString],
|
|
||||||
) -> BoxTokenStream {
|
|
||||||
if tok_strings.len() == 1 {
|
|
||||||
PreTokenizedStream::from((*tok_strings[0]).clone()).into()
|
|
||||||
} else {
|
|
||||||
let mut offsets = vec![];
|
|
||||||
let mut total_offset = 0;
|
|
||||||
for &tok_string in tok_strings {
|
|
||||||
offsets.push(total_offset);
|
|
||||||
if let Some(last_token) = tok_string.tokens.last() {
|
|
||||||
total_offset += last_token.offset_to;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO remove the string cloning.
|
|
||||||
let token_streams: Vec<BoxTokenStream<'static>> = tok_strings
|
|
||||||
.iter()
|
|
||||||
.map(|&tok_string| PreTokenizedStream::from((*tok_string).clone()).into())
|
|
||||||
.collect();
|
|
||||||
TokenStreamChain::new(offsets, token_streams).into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TokenStream for PreTokenizedStream {
|
|
||||||
fn advance(&mut self) -> bool {
|
|
||||||
self.current_token += 1;
|
|
||||||
self.current_token < self.tokenized_string.tokens.len() as i64
|
|
||||||
}
|
|
||||||
|
|
||||||
fn token(&self) -> &Token {
|
|
||||||
assert!(
|
|
||||||
self.current_token >= 0,
|
|
||||||
"TokenStream not initialized. You should call advance() at least once."
|
|
||||||
);
|
|
||||||
&self.tokenized_string.tokens[self.current_token as usize]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn token_mut(&mut self) -> &mut Token {
|
|
||||||
assert!(
|
|
||||||
self.current_token >= 0,
|
|
||||||
"TokenStream not initialized. You should call advance() at least once."
|
|
||||||
);
|
|
||||||
&mut self.tokenized_string.tokens[self.current_token as usize]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
use crate::tokenizer::Token;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_tokenized_stream() {
|
|
||||||
let tok_text = PreTokenizedString {
|
|
||||||
text: String::from("A a"),
|
|
||||||
tokens: vec![
|
|
||||||
Token {
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 1,
|
|
||||||
position: 0,
|
|
||||||
text: String::from("A"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 2,
|
|
||||||
offset_to: 3,
|
|
||||||
position: 1,
|
|
||||||
text: String::from("a"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut token_stream = PreTokenizedStream::from(tok_text.clone());
|
|
||||||
|
|
||||||
for expected_token in tok_text.tokens {
|
|
||||||
assert!(token_stream.advance());
|
|
||||||
assert_eq!(token_stream.token(), &expected_token);
|
|
||||||
}
|
|
||||||
assert!(!token_stream.advance());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_chain_tokenized_strings() {
|
|
||||||
let tok_text = PreTokenizedString {
|
|
||||||
text: String::from("A a"),
|
|
||||||
tokens: vec![
|
|
||||||
Token {
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 1,
|
|
||||||
position: 0,
|
|
||||||
text: String::from("A"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 2,
|
|
||||||
offset_to: 3,
|
|
||||||
position: 1,
|
|
||||||
text: String::from("a"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
let chain_parts = vec![&tok_text, &tok_text];
|
|
||||||
|
|
||||||
let mut token_stream = PreTokenizedStream::chain_tokenized_strings(&chain_parts[..]);
|
|
||||||
|
|
||||||
let expected_tokens = vec![
|
|
||||||
Token {
|
|
||||||
offset_from: 0,
|
|
||||||
offset_to: 1,
|
|
||||||
position: 0,
|
|
||||||
text: String::from("A"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 2,
|
|
||||||
offset_to: 3,
|
|
||||||
position: 1,
|
|
||||||
text: String::from("a"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 3,
|
|
||||||
offset_to: 4,
|
|
||||||
position: 3,
|
|
||||||
text: String::from("A"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
Token {
|
|
||||||
offset_from: 5,
|
|
||||||
offset_to: 6,
|
|
||||||
position: 4,
|
|
||||||
text: String::from("a"),
|
|
||||||
position_length: 1,
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
for expected_token in expected_tokens {
|
|
||||||
assert!(token_stream.advance());
|
|
||||||
assert_eq!(token_stream.token(), &expected_token);
|
|
||||||
}
|
|
||||||
assert!(!token_stream.advance());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2,10 +2,9 @@ use crate::tokenizer::TokenStreamChain;
|
|||||||
/// The tokenizer module contains all of the tools used to process
|
/// The tokenizer module contains all of the tools used to process
|
||||||
/// text in `tantivy`.
|
/// text in `tantivy`.
|
||||||
use std::borrow::{Borrow, BorrowMut};
|
use std::borrow::{Borrow, BorrowMut};
|
||||||
use std::ops::{Deref, DerefMut};
|
|
||||||
|
|
||||||
/// Token
|
/// Token
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Token {
|
pub struct Token {
|
||||||
/// Offset (byte index) of the first character of the token.
|
/// Offset (byte index) of the first character of the token.
|
||||||
/// Offsets shall not be modified by token filters.
|
/// Offsets shall not be modified by token filters.
|
||||||
@@ -34,31 +33,20 @@ impl Default for Token {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `TextAnalyzer` tokenizes an input text into tokens and modifies the resulting `TokenStream`.
|
/// `Tokenizer` are in charge of splitting text into a stream of token
|
||||||
|
/// before indexing.
|
||||||
///
|
///
|
||||||
/// It simply wraps a `Tokenizer` and a list of `TokenFilter` that are applied sequentially.
|
/// See the [module documentation](./index.html) for more detail.
|
||||||
pub struct TextAnalyzer {
|
///
|
||||||
tokenizer: Box<dyn Tokenizer>,
|
/// # Warning
|
||||||
token_filters: Vec<BoxTokenFilter>,
|
///
|
||||||
}
|
/// This API may change to use associated types.
|
||||||
|
pub trait Tokenizer<'a>: Sized + Clone {
|
||||||
|
/// Type associated to the resulting tokenstream tokenstream.
|
||||||
|
type TokenStreamImpl: TokenStream;
|
||||||
|
|
||||||
impl<T: Tokenizer> From<T> for TextAnalyzer {
|
/// Creates a token stream for a given `str`.
|
||||||
fn from(tokenizer: T) -> Self {
|
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl;
|
||||||
TextAnalyzer::new(tokenizer, Vec::new())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TextAnalyzer {
|
|
||||||
/// Creates a new `TextAnalyzer` given a tokenizer and a vector of `BoxTokenFilter`.
|
|
||||||
///
|
|
||||||
/// When creating a `TextAnalyzer` from a `Tokenizer` alone, prefer using
|
|
||||||
/// `TextAnalyzer::from(tokenizer)`.
|
|
||||||
pub fn new<T: Tokenizer>(tokenizer: T, token_filters: Vec<BoxTokenFilter>) -> TextAnalyzer {
|
|
||||||
TextAnalyzer {
|
|
||||||
tokenizer: Box::new(tokenizer),
|
|
||||||
token_filters,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Appends a token filter to the current tokenizer.
|
/// Appends a token filter to the current tokenizer.
|
||||||
///
|
///
|
||||||
@@ -70,26 +58,92 @@ impl TextAnalyzer {
|
|||||||
/// ```rust
|
/// ```rust
|
||||||
/// use tantivy::tokenizer::*;
|
/// use tantivy::tokenizer::*;
|
||||||
///
|
///
|
||||||
/// let en_stem = TextAnalyzer::from(SimpleTokenizer)
|
/// # fn main() {
|
||||||
|
/// let en_stem = SimpleTokenizer
|
||||||
/// .filter(RemoveLongFilter::limit(40))
|
/// .filter(RemoveLongFilter::limit(40))
|
||||||
/// .filter(LowerCaser)
|
/// .filter(LowerCaser)
|
||||||
/// .filter(Stemmer::default());
|
/// .filter(Stemmer::default());
|
||||||
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
pub fn filter<F: Into<BoxTokenFilter>>(mut self, token_filter: F) -> Self {
|
fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self>
|
||||||
self.token_filters.push(token_filter.into());
|
where
|
||||||
self
|
NewFilter: TokenFilter<<Self as Tokenizer<'a>>::TokenStreamImpl>,
|
||||||
|
{
|
||||||
|
ChainTokenizer {
|
||||||
|
head: new_filter,
|
||||||
|
tail: self,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A boxed tokenizer
|
||||||
|
trait BoxedTokenizerTrait: Send + Sync {
|
||||||
|
/// Tokenize a `&str`
|
||||||
|
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a>;
|
||||||
|
|
||||||
|
/// Tokenize an array`&str`
|
||||||
|
///
|
||||||
|
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
|
||||||
|
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
|
||||||
|
/// to prevent accidental `PhraseQuery` to match accross two terms.
|
||||||
|
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b>;
|
||||||
|
|
||||||
|
/// Return a boxed clone of the tokenizer
|
||||||
|
fn boxed_clone(&self) -> BoxedTokenizer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A boxed tokenizer
|
||||||
|
pub struct BoxedTokenizer(Box<dyn BoxedTokenizerTrait>);
|
||||||
|
|
||||||
|
impl<T> From<T> for BoxedTokenizer
|
||||||
|
where
|
||||||
|
T: 'static + Send + Sync + for<'a> Tokenizer<'a>,
|
||||||
|
{
|
||||||
|
fn from(tokenizer: T) -> BoxedTokenizer {
|
||||||
|
BoxedTokenizer(Box::new(BoxableTokenizer(tokenizer)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BoxedTokenizer {
|
||||||
|
/// Tokenize a `&str`
|
||||||
|
pub fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
|
||||||
|
self.0.token_stream(text)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tokenize an array`&str`
|
/// Tokenize an array`&str`
|
||||||
///
|
///
|
||||||
/// The resulting `BoxTokenStream` is equivalent to what would be obtained if the &str were
|
/// The resulting `TokenStream` is equivalent to what would be obtained if the &str were
|
||||||
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
|
/// one concatenated `&str`, with an artificial position gap of `2` between the different fields
|
||||||
/// to prevent accidental `PhraseQuery` to match accross two terms.
|
/// to prevent accidental `PhraseQuery` to match accross two terms.
|
||||||
pub fn token_stream_texts<'a>(&self, texts: &'a [&'a str]) -> BoxTokenStream<'a> {
|
pub fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
|
||||||
|
self.0.token_stream_texts(texts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for BoxedTokenizer {
|
||||||
|
fn clone(&self) -> BoxedTokenizer {
|
||||||
|
self.0.boxed_clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct BoxableTokenizer<A>(A)
|
||||||
|
where
|
||||||
|
A: for<'a> Tokenizer<'a> + Send + Sync;
|
||||||
|
|
||||||
|
impl<A> BoxedTokenizerTrait for BoxableTokenizer<A>
|
||||||
|
where
|
||||||
|
A: 'static + Send + Sync + for<'a> Tokenizer<'a>,
|
||||||
|
{
|
||||||
|
fn token_stream<'a>(&self, text: &'a str) -> Box<dyn TokenStream + 'a> {
|
||||||
|
Box::new(self.0.token_stream(text))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn token_stream_texts<'b>(&self, texts: &'b [&'b str]) -> Box<dyn TokenStream + 'b> {
|
||||||
assert!(!texts.is_empty());
|
assert!(!texts.is_empty());
|
||||||
if texts.len() == 1 {
|
if texts.len() == 1 {
|
||||||
self.token_stream(texts[0])
|
Box::new(self.0.token_stream(texts[0]))
|
||||||
} else {
|
} else {
|
||||||
let mut offsets = vec![];
|
let mut offsets = vec![];
|
||||||
let mut total_offset = 0;
|
let mut total_offset = 0;
|
||||||
@@ -97,124 +151,34 @@ impl TextAnalyzer {
|
|||||||
offsets.push(total_offset);
|
offsets.push(total_offset);
|
||||||
total_offset += text.len();
|
total_offset += text.len();
|
||||||
}
|
}
|
||||||
let token_streams: Vec<BoxTokenStream<'a>> = texts
|
let token_streams: Vec<_> =
|
||||||
.iter()
|
texts.iter().map(|text| self.0.token_stream(text)).collect();
|
||||||
.cloned()
|
Box::new(TokenStreamChain::new(offsets, token_streams))
|
||||||
.map(|text| self.token_stream(text))
|
|
||||||
.collect();
|
|
||||||
From::from(TokenStreamChain::new(offsets, token_streams))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a token stream for a given `str`.
|
fn boxed_clone(&self) -> BoxedTokenizer {
|
||||||
pub fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
|
self.0.clone().into()
|
||||||
let mut token_stream = self.tokenizer.token_stream(text);
|
|
||||||
for token_filter in &self.token_filters {
|
|
||||||
token_stream = token_filter.transform(token_stream);
|
|
||||||
}
|
|
||||||
token_stream
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for TextAnalyzer {
|
impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
|
||||||
fn clone(&self) -> Self {
|
|
||||||
TextAnalyzer {
|
|
||||||
tokenizer: self.tokenizer.box_clone(),
|
|
||||||
token_filters: self
|
|
||||||
.token_filters
|
|
||||||
.iter()
|
|
||||||
.map(|token_filter| token_filter.box_clone())
|
|
||||||
.collect(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `Tokenizer` are in charge of splitting text into a stream of token
|
|
||||||
/// before indexing.
|
|
||||||
///
|
|
||||||
/// See the [module documentation](./index.html) for more detail.
|
|
||||||
///
|
|
||||||
/// # Warning
|
|
||||||
///
|
|
||||||
/// This API may change to use associated types.
|
|
||||||
pub trait Tokenizer: 'static + Send + Sync + TokenizerClone {
|
|
||||||
/// Creates a token stream for a given `str`.
|
|
||||||
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait TokenizerClone {
|
|
||||||
fn box_clone(&self) -> Box<dyn Tokenizer>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Tokenizer + Clone> TokenizerClone for T {
|
|
||||||
fn box_clone(&self) -> Box<dyn Tokenizer> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> TokenStream for Box<dyn TokenStream + 'a> {
|
|
||||||
fn advance(&mut self) -> bool {
|
fn advance(&mut self) -> bool {
|
||||||
let token_stream: &mut dyn TokenStream = self.borrow_mut();
|
let token_stream: &mut dyn TokenStream = self.borrow_mut();
|
||||||
token_stream.advance()
|
token_stream.advance()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn token<'b>(&'b self) -> &'b Token {
|
fn token(&self) -> &Token {
|
||||||
let token_stream: &'b (dyn TokenStream + 'a) = self.borrow();
|
let token_stream: &dyn TokenStream = self.borrow();
|
||||||
token_stream.token()
|
token_stream.token()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn token_mut<'b>(&'b mut self) -> &'b mut Token {
|
fn token_mut(&mut self) -> &mut Token {
|
||||||
let token_stream: &'b mut (dyn TokenStream + 'a) = self.borrow_mut();
|
let token_stream: &mut dyn TokenStream = self.borrow_mut();
|
||||||
token_stream.token_mut()
|
token_stream.token_mut()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Simple wrapper of `Box<dyn TokenStream + 'a>`.
|
|
||||||
///
|
|
||||||
/// See `TokenStream` for more information.
|
|
||||||
pub struct BoxTokenStream<'a>(Box<dyn TokenStream + 'a>);
|
|
||||||
|
|
||||||
impl<'a, T> From<T> for BoxTokenStream<'a>
|
|
||||||
where
|
|
||||||
T: TokenStream + 'a,
|
|
||||||
{
|
|
||||||
fn from(token_stream: T) -> BoxTokenStream<'a> {
|
|
||||||
BoxTokenStream(Box::new(token_stream))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Deref for BoxTokenStream<'a> {
|
|
||||||
type Target = dyn TokenStream + 'a;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> DerefMut for BoxTokenStream<'a> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut *self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Simple wrapper of `Box<dyn TokenFilter + 'a>`.
|
|
||||||
///
|
|
||||||
/// See `TokenStream` for more information.
|
|
||||||
pub struct BoxTokenFilter(Box<dyn TokenFilter>);
|
|
||||||
|
|
||||||
impl Deref for BoxTokenFilter {
|
|
||||||
type Target = dyn TokenFilter;
|
|
||||||
|
|
||||||
fn deref(&self) -> &dyn TokenFilter {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TokenFilter> From<T> for BoxTokenFilter {
|
|
||||||
fn from(tokenizer: T) -> BoxTokenFilter {
|
|
||||||
BoxTokenFilter(Box::new(tokenizer))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `TokenStream` is the result of the tokenization.
|
/// `TokenStream` is the result of the tokenization.
|
||||||
///
|
///
|
||||||
/// It consists consumable stream of `Token`s.
|
/// It consists consumable stream of `Token`s.
|
||||||
@@ -224,7 +188,8 @@ impl<T: TokenFilter> From<T> for BoxTokenFilter {
|
|||||||
/// ```
|
/// ```
|
||||||
/// use tantivy::tokenizer::*;
|
/// use tantivy::tokenizer::*;
|
||||||
///
|
///
|
||||||
/// let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
/// # fn main() {
|
||||||
|
/// let tokenizer = SimpleTokenizer
|
||||||
/// .filter(RemoveLongFilter::limit(40))
|
/// .filter(RemoveLongFilter::limit(40))
|
||||||
/// .filter(LowerCaser);
|
/// .filter(LowerCaser);
|
||||||
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
|
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
|
||||||
@@ -242,6 +207,7 @@ impl<T: TokenFilter> From<T> for BoxTokenFilter {
|
|||||||
/// assert_eq!(token.offset_to, 12);
|
/// assert_eq!(token.offset_to, 12);
|
||||||
/// assert_eq!(token.position, 1);
|
/// assert_eq!(token.position, 1);
|
||||||
/// }
|
/// }
|
||||||
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
pub trait TokenStream {
|
pub trait TokenStream {
|
||||||
@@ -261,15 +227,17 @@ pub trait TokenStream {
|
|||||||
/// and `.token()`.
|
/// and `.token()`.
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use tantivy::tokenizer::*;
|
/// # use tantivy::tokenizer::*;
|
||||||
///
|
/// #
|
||||||
/// let tokenizer = TextAnalyzer::from(SimpleTokenizer)
|
/// # fn main() {
|
||||||
/// .filter(RemoveLongFilter::limit(40))
|
/// # let tokenizer = SimpleTokenizer
|
||||||
/// .filter(LowerCaser);
|
/// # .filter(RemoveLongFilter::limit(40))
|
||||||
|
/// # .filter(LowerCaser);
|
||||||
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
|
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
|
||||||
/// while let Some(token) = token_stream.next() {
|
/// while let Some(token) = token_stream.next() {
|
||||||
/// println!("Token {:?}", token.text);
|
/// println!("Token {:?}", token.text);
|
||||||
/// }
|
/// }
|
||||||
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
fn next(&mut self) -> Option<&Token> {
|
fn next(&mut self) -> Option<&Token> {
|
||||||
if self.advance() {
|
if self.advance() {
|
||||||
@@ -281,8 +249,6 @@ pub trait TokenStream {
|
|||||||
|
|
||||||
/// Helper function to consume the entire `TokenStream`
|
/// Helper function to consume the entire `TokenStream`
|
||||||
/// and push the tokens to a sink function.
|
/// and push the tokens to a sink function.
|
||||||
///
|
|
||||||
/// Remove this.
|
|
||||||
fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 {
|
fn process(&mut self, sink: &mut dyn FnMut(&Token)) -> u32 {
|
||||||
let mut num_tokens_pushed = 0u32;
|
let mut num_tokens_pushed = 0u32;
|
||||||
while self.advance() {
|
while self.advance() {
|
||||||
@@ -293,20 +259,33 @@ pub trait TokenStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait TokenFilterClone {
|
#[derive(Clone)]
|
||||||
fn box_clone(&self) -> BoxTokenFilter;
|
pub struct ChainTokenizer<HeadTokenFilterFactory, TailTokenizer> {
|
||||||
|
head: HeadTokenFilterFactory,
|
||||||
|
tail: TailTokenizer,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, HeadTokenFilterFactory, TailTokenizer> Tokenizer<'a>
|
||||||
|
for ChainTokenizer<HeadTokenFilterFactory, TailTokenizer>
|
||||||
|
where
|
||||||
|
HeadTokenFilterFactory: TokenFilter<TailTokenizer::TokenStreamImpl>,
|
||||||
|
TailTokenizer: Tokenizer<'a>,
|
||||||
|
{
|
||||||
|
type TokenStreamImpl = HeadTokenFilterFactory::ResultTokenStream;
|
||||||
|
|
||||||
|
fn token_stream(&self, text: &'a str) -> Self::TokenStreamImpl {
|
||||||
|
let tail_token_stream = self.tail.token_stream(text);
|
||||||
|
self.head.transform(tail_token_stream)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Trait for the pluggable components of `Tokenizer`s.
|
/// Trait for the pluggable components of `Tokenizer`s.
|
||||||
pub trait TokenFilter: 'static + Send + Sync + TokenFilterClone {
|
pub trait TokenFilter<TailTokenStream: TokenStream>: Clone {
|
||||||
/// Wraps a token stream and returns the modified one.
|
/// The resulting `TokenStream` type.
|
||||||
fn transform<'a>(&self, token_stream: BoxTokenStream<'a>) -> BoxTokenStream<'a>;
|
type ResultTokenStream: TokenStream;
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: TokenFilter + Clone> TokenFilterClone for T {
|
/// Wraps a token stream and returns the modified one.
|
||||||
fn box_clone(&self) -> BoxTokenFilter {
|
fn transform(&self, token_stream: TailTokenStream) -> Self::ResultTokenStream;
|
||||||
BoxTokenFilter::from(self.clone())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
use crate::tokenizer::stemmer::Language;
|
use crate::tokenizer::stemmer::Language;
|
||||||
use crate::tokenizer::tokenizer::TextAnalyzer;
|
use crate::tokenizer::BoxedTokenizer;
|
||||||
use crate::tokenizer::LowerCaser;
|
use crate::tokenizer::LowerCaser;
|
||||||
use crate::tokenizer::RawTokenizer;
|
use crate::tokenizer::RawTokenizer;
|
||||||
use crate::tokenizer::RemoveLongFilter;
|
use crate::tokenizer::RemoveLongFilter;
|
||||||
use crate::tokenizer::SimpleTokenizer;
|
use crate::tokenizer::SimpleTokenizer;
|
||||||
use crate::tokenizer::Stemmer;
|
use crate::tokenizer::Stemmer;
|
||||||
|
use crate::tokenizer::Tokenizer;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
@@ -22,16 +23,16 @@ use std::sync::{Arc, RwLock};
|
|||||||
/// search engine.
|
/// search engine.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct TokenizerManager {
|
pub struct TokenizerManager {
|
||||||
tokenizers: Arc<RwLock<HashMap<String, TextAnalyzer>>>,
|
tokenizers: Arc<RwLock<HashMap<String, BoxedTokenizer>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TokenizerManager {
|
impl TokenizerManager {
|
||||||
/// Registers a new tokenizer associated with a given name.
|
/// Registers a new tokenizer associated with a given name.
|
||||||
pub fn register<T>(&self, tokenizer_name: &str, tokenizer: T)
|
pub fn register<A>(&self, tokenizer_name: &str, tokenizer: A)
|
||||||
where
|
where
|
||||||
TextAnalyzer: From<T>,
|
A: Into<BoxedTokenizer>,
|
||||||
{
|
{
|
||||||
let boxed_tokenizer: TextAnalyzer = TextAnalyzer::from(tokenizer);
|
let boxed_tokenizer = tokenizer.into();
|
||||||
self.tokenizers
|
self.tokenizers
|
||||||
.write()
|
.write()
|
||||||
.expect("Acquiring the lock should never fail")
|
.expect("Acquiring the lock should never fail")
|
||||||
@@ -39,7 +40,7 @@ impl TokenizerManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessing a tokenizer given its name.
|
/// Accessing a tokenizer given its name.
|
||||||
pub fn get(&self, tokenizer_name: &str) -> Option<TextAnalyzer> {
|
pub fn get(&self, tokenizer_name: &str) -> Option<BoxedTokenizer> {
|
||||||
self.tokenizers
|
self.tokenizers
|
||||||
.read()
|
.read()
|
||||||
.expect("Acquiring the lock should never fail")
|
.expect("Acquiring the lock should never fail")
|
||||||
@@ -61,13 +62,13 @@ impl Default for TokenizerManager {
|
|||||||
manager.register("raw", RawTokenizer);
|
manager.register("raw", RawTokenizer);
|
||||||
manager.register(
|
manager.register(
|
||||||
"default",
|
"default",
|
||||||
TextAnalyzer::from(SimpleTokenizer)
|
SimpleTokenizer
|
||||||
.filter(RemoveLongFilter::limit(40))
|
.filter(RemoveLongFilter::limit(40))
|
||||||
.filter(LowerCaser),
|
.filter(LowerCaser),
|
||||||
);
|
);
|
||||||
manager.register(
|
manager.register(
|
||||||
"en_stem",
|
"en_stem",
|
||||||
TextAnalyzer::from(SimpleTokenizer)
|
SimpleTokenizer
|
||||||
.filter(RemoveLongFilter::limit(40))
|
.filter(RemoveLongFilter::limit(40))
|
||||||
.filter(LowerCaser)
|
.filter(LowerCaser)
|
||||||
.filter(Stemmer::new(Language::English)),
|
.filter(Stemmer::new(Language::English)),
|
||||||
|
|||||||
@@ -28,11 +28,11 @@ fn test_failpoints_managed_directory_gc_if_delete_fails() {
|
|||||||
// The initial 1*off is there to allow for the removal of the
|
// The initial 1*off is there to allow for the removal of the
|
||||||
// lock file.
|
// lock file.
|
||||||
fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap();
|
fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap();
|
||||||
assert!(managed_directory.garbage_collect(Default::default).is_ok());
|
managed_directory.garbage_collect(Default::default);
|
||||||
assert!(managed_directory.exists(test_path));
|
assert!(managed_directory.exists(test_path));
|
||||||
|
|
||||||
// running the gc a second time should remove the file.
|
// running the gc a second time should remove the file.
|
||||||
assert!(managed_directory.garbage_collect(Default::default).is_ok());
|
managed_directory.garbage_collect(Default::default);
|
||||||
assert!(
|
assert!(
|
||||||
!managed_directory.exists(test_path),
|
!managed_directory.exists(test_path),
|
||||||
"The file should have been deleted"
|
"The file should have been deleted"
|
||||||
|
|||||||
Reference in New Issue
Block a user