Compare commits

..

2 Commits

Author SHA1 Message Date
Paul Masurel
9fd23f3abf Fixing bench compilation 2019-10-04 16:36:17 +09:00
Paul Masurel
c030990d00 fmt 2019-10-02 09:50:20 +09:00
89 changed files with 1526 additions and 3620 deletions

12
.github/FUNDING.yml vendored
View File

@@ -1,12 +0,0 @@
# These are supported funding model platforms
github: fulmicoton
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@@ -1,17 +1,3 @@
Tantivy 0.11.3
=======================
- Fixed DateTime as a fast field (#735)
Tantivy 0.11.2
=======================
- The future returned by `IndexWriter::merge` does not borrow `self` mutably anymore (#732)
- Exposing a constructor for `WatchHandle` (#731)
Tantivy 0.11.1
=====================
- Bug fix #729
Tantivy 0.11.0 Tantivy 0.11.0
===================== =====================
@@ -23,19 +9,13 @@ Tantivy 0.11.0
- API change around `Box<BoxableTokenizer>`. See detail in #629 - API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock) - Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima) - Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- Add a method to check the compatibility of the footer in the index with the running version of tantivy (@petr-tik)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
- Added handling of pre-tokenized text fields (#642), which will enable users to
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
## How to update? ## How to update?
- The index format is changed. You are required to reindex your data to use tantivy 0.11.
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct. - `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return - Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required. an error and handling the `Result` is required.
- `tantivy::version()` now returns a `Version` object. This object implements `ToString()`
Tantivy 0.10.2 Tantivy 0.10.2
===================== =====================

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.11.3" version = "0.11.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -13,7 +13,7 @@ keywords = ["search", "information", "retrieval"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
base64 = "0.11.0" base64 = "0.10.0"
byteorder = "1.0" byteorder = "1.0"
crc32fast = "1.2.0" crc32fast = "1.2.0"
once_cell = "1.0" once_cell = "1.0"
@@ -33,25 +33,27 @@ fs2={version="0.4", optional=true}
itertools = "0.8" itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]} levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true} notify = {version="4", optional=true}
uuid = { version = "0.8", features = ["v4", "serde"] } bit-set = "0.5"
uuid = { version = "0.7.2", features = ["v4", "serde"] }
crossbeam = "0.7" crossbeam = "0.7"
futures = {version = "0.3", features=["thread-pool"] } futures = "0.1"
futures-cpupool = "0.1"
owning_ref = "0.4" owning_ref = "0.4"
stable_deref_trait = "1.0.0" stable_deref_trait = "1.0.0"
rust-stemmers = "1.2" rust-stemmers = "1.1"
downcast-rs = { version="1.0" } downcast-rs = { version="1.0" }
tantivy-query-grammar = { version="0.11", path="./query-grammar" } tantivy-query-grammar = { path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]} bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.4" census = "0.2"
fnv = "1.0.6" fnv = "1.0.6"
owned-read = "0.4" owned-read = "0.4"
failure = "0.1" failure = "0.1"
htmlescape = "0.3.1" htmlescape = "0.3.1"
fail = "0.3" fail = "0.3"
scoped-pool = "1.0"
murmurhash32 = "0.2" murmurhash32 = "0.2"
chrono = "0.4" chrono = "0.4"
smallvec = "1.0" smallvec = "0.6"
rayon = "1"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.3" winapi = "0.3"
@@ -62,10 +64,6 @@ maplit = "1"
matches = "0.1.8" matches = "0.1.8"
time = "0.1.42" time = "0.1.42"
[dev-dependencies.fail]
version = "0.3"
features = ["failpoints"]
[profile.release] [profile.release]
opt-level = 3 opt-level = 3
debug = false debug = false
@@ -89,6 +87,10 @@ members = ["query-grammar"]
[badges] [badges]
travis-ci = { repository = "tantivy-search/tantivy" } travis-ci = { repository = "tantivy-search/tantivy" }
[dev-dependencies.fail]
version = "0.3"
features = ["failpoints"]
# Following the "fail" crate best practises, we isolate # Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points # tests that define specific behavior in fail check points
# in a different binary. # in a different binary.

View File

@@ -21,9 +21,9 @@
[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
**Tantivy** is a **full text search engine library** written in Rust. **Tantivy** is a **full text search engine library** written in rust.
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
an off-the-shelf search engine server, but rather a crate that can be used an off-the-shelf search engine server, but rather a crate that can be used
to build such a search engine. to build such a search engine.
@@ -31,7 +31,7 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
# Benchmark # Benchmark
Tantivy is typically faster than Lucene, but the results depend on Tantivy is typically faster than Lucene, but the results will depend on
the nature of the queries in your workload. the nature of the queries in your workload.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs The following [benchmark](https://tantivy-search.github.io/bench/) break downs
@@ -40,19 +40,19 @@ performance for different type of queries / collection.
# Features # Features
- Full-text search - Full-text search
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) - Configurable tokenizer. (stemming available for 17 latin languages. Third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:) - Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools - Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as Lucene) - BM25 scoring (the same as lucene)
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`) - Natural query language `(michael AND jackson) OR "king of pop"`
- Phrase queries search (e.g. `"michael jackson"`) - Phrase queries search (`"michael jackson"`)
- Incremental indexing - Incremental indexing
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop) - Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
- Mmap directory - Mmap directory
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set - SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene) - Single valued and multivalued u64, i64 and f64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields - `&[u8]` fast fields
- Text, i64, u64, f64, dates, and hierarchical facet fields - Text, i64, u64, f64, dates and hierarchical facet fields
- LZ4 compressed document store - LZ4 compressed document store
- Range queries - Range queries
- Faceted search - Faceted search
@@ -61,42 +61,43 @@ performance for different type of queries / collection.
# Non-features # Non-features
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a - Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
library upon which one could build a distributed search. Serializable/mergeable collector state for instance, library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
are within the scope of Tantivy. are within the scope of tantivy.
# Supported OS and compiler # Supported OS and compiler
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows. Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
# Getting started # Getting started
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html) - [tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine, - [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
index documents, and search via the CLI or a small server with a REST API. `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
It walks you through getting a wikipedia search engine up and running in a few minutes. index documents and search via the CLI or a small server with a REST API.
- [Reference doc for the last released version](https://docs.rs/tantivy/) It will walk you through getting a wikipedia search engine up and running in a few minutes.
- [reference doc for the last released version](https://docs.rs/tantivy/)
# How can I support this project? # How can I support this project?
There are many ways to support this project. There are many ways to support this project.
- Use Tantivy and tell us about your experience on [Gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com) - Use tantivy and tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Report bugs - Report bugs
- Write a blog post - Write a blog post
- Help with documentation by asking questions or submitting PRs - Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy)) - Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
- Talk about Tantivy around you - Talk about tantivy around you
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) - Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
# Contributing code # Contributing code
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR. We use the GitHub Pull Request workflow - reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Clone and build locally ## Clone and build locally
Tantivy compiles on stable Rust but requires `Rust >= 1.27`. Tantivy compiles on stable rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run: To check out and run tests, you can simply run :
```bash ```bash
git clone https://github.com/tantivy-search/tantivy.git git clone https://github.com/tantivy-search/tantivy.git
@@ -107,7 +108,7 @@ To check out and run tests, you can simply run:
## Run tests ## Run tests
Some tests will not run with just `cargo test` because of `fail-rs`. Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`. To run the tests exhaustively, run `./run-tests.sh`
## Debug ## Debug
@@ -115,13 +116,13 @@ You might find it useful to step through the programme with a debugger.
### A failing test ### A failing test
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`: Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that `target/` dir exists. Use this bash script to find the most name of the most recent debug build of tantivy and run it under rust-gdb.
```bash ```bash
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
``` ```
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this: Now that you are in rust-gdb, you can set breakpoints on lines and methods that match your source-code and run the debug executable with flags that you normally pass to `cargo test` to like this
```bash ```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST $gdb run --test-threads 1 --test $NAME_OF_TEST
@@ -129,7 +130,7 @@ $gdb run --test-threads 1 --test $NAME_OF_TEST
### An example ### An example
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs: By default, rustc compiles everything in the `examples/` dir in debug mode. This makes it easy for you to make examples to reproduce bugs.
```bash ```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME rust-gdb target/debug/examples/$EXAMPLE_NAME

View File

@@ -13,100 +13,63 @@
// --- // ---
// Importing tantivy... // Importing tantivy...
use tantivy::collector::FacetCollector; use tantivy::collector::FacetCollector;
use tantivy::query::{AllQuery, TermQuery}; use tantivy::query::AllQuery;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::{doc, Index}; use tantivy::{doc, Index};
use tempfile::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
// Let's create a temporary directory for the sake of this example // Let's create a temporary directory for the
// sake of this example
let index_path = TempDir::new()?;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_text_field("felin_name", TEXT | STORED); schema_builder.add_text_field("name", TEXT | STORED);
// this is our faceted field: its scientific classification
let classification = schema_builder.add_facet_field("classification"); // this is our faceted field
schema_builder.add_facet_field("tags");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer(30_000_000)?; let index = Index::create_in_dir(&index_path, schema.clone())?;
let mut index_writer = index.writer(50_000_000)?;
let name = schema.get_field("name").unwrap();
let tags = schema.get_field("tags").unwrap();
// For convenience, tantivy also comes with a macro to // For convenience, tantivy also comes with a macro to
// reduce the boilerplate above. // reduce the boilerplate above.
index_writer.add_document(doc!( index_writer.add_document(doc!(
name => "Cat", name => "the ditch",
classification => Facet::from("/Felidae/Felinae/Felis") tags => Facet::from("/pools/north")
)); ));
index_writer.add_document(doc!( index_writer.add_document(doc!(
name => "Canada lynx", name => "little stacey",
classification => Facet::from("/Felidae/Felinae/Lynx") tags => Facet::from("/pools/south")
));
index_writer.add_document(doc!(
name => "Cheetah",
classification => Facet::from("/Felidae/Felinae/Acinonyx")
));
index_writer.add_document(doc!(
name => "Tiger",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Lion",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Jaguar",
classification => Facet::from("/Felidae/Pantherinae/Panthera")
));
index_writer.add_document(doc!(
name => "Sunda clouded leopard",
classification => Facet::from("/Felidae/Pantherinae/Neofelis")
));
index_writer.add_document(doc!(
name => "Fossa",
classification => Facet::from("/Eupleridae/Cryptoprocta")
)); ));
index_writer.commit()?; index_writer.commit()?;
let reader = index.reader()?; let reader = index.reader()?;
let searcher = reader.searcher(); let searcher = reader.searcher();
{
let mut facet_collector = FacetCollector::for_field(classification);
facet_collector.add_facet("/Felidae");
let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
// This lists all of the facet counts, right below "/Felidae".
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae").collect();
assert_eq!(
facets,
vec![
(&Facet::from("/Felidae/Felinae"), 3),
(&Facet::from("/Felidae/Pantherinae"), 4),
]
);
}
// Facets are also searchable. let mut facet_collector = FacetCollector::for_field(tags);
// facet_collector.add_facet("/pools");
// For instance a common UI pattern is to allow the user someone to click on a facet link
// (e.g: `Pantherinae`) to drill down and filter the current result set with this subfacet.
//
// The search would then look as follows.
// Check the reference doc for different ways to create a `Facet` object. let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap();
{
let facet = Facet::from_text("/Felidae/Pantherinae"); // This lists all of the facet counts
let facet_term = Term::from_facet(classification, &facet); let facets: Vec<(&Facet, u64)> = facet_counts.get("/pools").collect();
let facet_term_query = TermQuery::new(facet_term, IndexRecordOption::Basic); assert_eq!(
let mut facet_collector = FacetCollector::for_field(classification); facets,
facet_collector.add_facet("/Felidae/Pantherinae"); vec![
let facet_counts = searcher.search(&facet_term_query, &facet_collector)?; (&Facet::from("/pools/north"), 1),
let facets: Vec<(&Facet, u64)> = facet_counts.get("/Felidae/Pantherinae").collect(); (&Facet::from("/pools/south"), 1),
assert_eq!( ]
facets, );
vec![
(&Facet::from("/Felidae/Pantherinae/Neofelis"), 1),
(&Facet::from("/Felidae/Pantherinae/Panthera"), 3),
]
);
}
Ok(()) Ok(())
} }

View File

@@ -1,140 +0,0 @@
// # Pre-tokenized text example
//
// This example shows how to use pre-tokenized text. Sometimes yout might
// want to index and search through text which is already split into
// tokens by some external tool.
//
// In this example we will:
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
// - import tokenized text straight from json,
// - perform a search on documents with pre-tokenized text
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
use tantivy::collector::{Count, TopDocs};
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::{doc, Index, ReloadPolicy};
use tempfile::TempDir;
fn pre_tokenize_text(text: &str) -> Vec<Token> {
let mut token_stream = SimpleTokenizer.token_stream(text);
let mut tokens = vec![];
while token_stream.advance() {
tokens.push(token_stream.token().clone());
}
tokens
}
fn main() -> tantivy::Result<()> {
let index_path = TempDir::new()?;
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT | STORED);
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_dir(&index_path, schema.clone())?;
let mut index_writer = index.writer(50_000_000)?;
// We can create a document manually, by setting the fields
// one by one in a Document object.
let title = schema.get_field("title").unwrap();
let body = schema.get_field("body").unwrap();
let title_text = "The Old Man and the Sea";
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
// Content of our first document
// We create `PreTokenizedString` which contains original text and vector of tokens
let title_tok = PreTokenizedString {
text: String::from(title_text),
tokens: pre_tokenize_text(title_text),
};
println!(
"Original text: \"{}\" and tokens: {:?}",
title_tok.text, title_tok.tokens
);
let body_tok = PreTokenizedString {
text: String::from(body_text),
tokens: pre_tokenize_text(body_text),
};
// Now lets create a document and add our `PreTokenizedString`
let old_man_doc = doc!(title => title_tok, body => body_tok);
// ... now let's just add it to the IndexWriter
index_writer.add_document(old_man_doc);
// Pretokenized text can also be fed as JSON
let short_man_json = r#"{
"title":[{
"text":"The Old Man",
"tokens":[
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
]
}]
}"#;
let short_man_doc = schema.parse_document(&short_man_json)?;
index_writer.add_document(short_man_doc);
// Let's commit changes
index_writer.commit()?;
// ... and now is the time to query our index
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()?;
let searcher = reader.searcher();
// We want to get documents with token "Man", we will use TermQuery to do it
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
// and lowercasing, which preserves full words in their original form
let query = TermQuery::new(
Term::from_field_text(title, "Man"),
IndexRecordOption::Basic,
);
let (top_docs, count) = searcher
.search(&query, &(TopDocs::with_limit(2), Count))
.unwrap();
assert_eq!(count, 2);
// Now let's print out the results.
// Note that the tokens are not stored along with the original text
// in the document store
for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?;
println!("Document: {}", schema.to_json(&retrieved_doc));
}
// In contrary to the previous query, when we search for the "man" term we
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
// only splits text on whitespace / punctuation.
let query = TermQuery::new(
Term::from_field_text(title, "man"),
IndexRecordOption::Basic,
);
let (_top_docs, count) = searcher
.search(&query, &(TopDocs::with_limit(2), Count))
.unwrap();
assert_eq!(count, 0);
Ok(())
}

View File

@@ -1,3 +0,0 @@
# Tantivy Query Grammar
This crate is used by tantivy to parse queries.

View File

@@ -2,7 +2,7 @@ use std::fmt;
use std::fmt::Write; use std::fmt::Write;
/// Defines whether a term in a query must be present, /// Defines whether a term in a query must be present,
/// should be present or must be not present. /// should be present or must not be present.
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)] #[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
pub enum Occur { pub enum Occur {
/// For a given document to be considered for scoring, /// For a given document to be considered for scoring,

View File

@@ -13,29 +13,44 @@ use crate::SegmentReader;
/// use tantivy::collector::Count; /// use tantivy::collector::Count;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index}; /// use tantivy::{doc, Index, Result};
/// ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let mut index_writer = index.writer(3_000_000).unwrap(); /// let reader = index.reader()?;
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// let searcher = reader.searcher();
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let reader = index.reader().unwrap(); /// {
/// let searcher = reader.searcher(); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let count = searcher.search(&query, &Count).unwrap();
/// ///
/// // Here comes the important part /// assert_eq!(count, 2);
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// }
/// let query = query_parser.parse_query("diary").unwrap();
/// let count = searcher.search(&query, &Count).unwrap();
/// ///
/// assert_eq!(count, 2); /// Ok(())
/// }
/// ``` /// ```
pub struct Count; pub struct Count;

View File

@@ -86,6 +86,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// use tantivy::schema::{Facet, Schema, TEXT}; /// use tantivy::schema::{Facet, Schema, TEXT};
/// use tantivy::{doc, Index, Result}; /// use tantivy::{doc, Index, Result};
/// ///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// ///
@@ -126,7 +127,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang"); /// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category"); /// facet_collector.add_facet("/category");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
@@ -142,7 +143,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// } /// }
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
@@ -157,8 +158,8 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ]); /// ]);
/// } /// }
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector)?; /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
@@ -171,7 +172,6 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// # assert!(example().is_ok());
/// ``` /// ```
pub struct FacetCollector { pub struct FacetCollector {
field: Field, field: Field,
@@ -452,11 +452,9 @@ impl FacetCounts {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{FacetCollector, FacetCounts}; use super::{FacetCollector, FacetCounts};
use crate::collector::Count;
use crate::core::Index; use crate::core::Index;
use crate::query::{AllQuery, QueryParser, TermQuery}; use crate::query::AllQuery;
use crate::schema::{Document, Facet, Field, IndexRecordOption, Schema}; use crate::schema::{Document, Facet, Field, Schema};
use crate::Term;
use rand::distributions::Uniform; use rand::distributions::Uniform;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@@ -517,7 +515,7 @@ mod tests {
#[should_panic(expected = "Tried to add a facet which is a descendant of \ #[should_panic(expected = "Tried to add a facet which is a descendant of \
an already added facet.")] an already added facet.")]
fn test_misused_facet_collector() { fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/country/europe")); facet_collector.add_facet(Facet::from("/country/europe"));
} }
@@ -546,59 +544,9 @@ mod tests {
assert_eq!(facets[0].1, 1); assert_eq!(facets[0].1, 1);
} }
#[test]
fn test_doc_search_by_facet() {
let mut schema_builder = Schema::builder();
let facet_field = schema_builder.add_facet_field("facet");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/A"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/B"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/A/C/A"),
));
index_writer.add_document(doc!(
facet_field => Facet::from_text(&"/D/C/A"),
));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 4);
let count_facet = |facet_str: &str| {
let term = Term::from_facet(facet_field, &Facet::from_text(facet_str));
searcher
.search(&TermQuery::new(term, IndexRecordOption::Basic), &Count)
.unwrap()
};
assert_eq!(count_facet("/"), 4);
assert_eq!(count_facet("/A"), 3);
assert_eq!(count_facet("/A/B"), 1);
assert_eq!(count_facet("/A/C"), 1);
assert_eq!(count_facet("/A/C/A"), 1);
assert_eq!(count_facet("/C/A"), 0);
{
let query_parser = QueryParser::for_index(&index, vec![]);
{
let query = query_parser.parse_query("facet:/A/B").unwrap();
assert_eq!(1, searcher.search(&query, &Count).unwrap());
}
{
let query = query_parser.parse_query("facet:/A").unwrap();
assert_eq!(3, searcher.search(&query, &Count).unwrap());
}
}
}
#[test] #[test]
fn test_non_used_facet_collector() { fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/countryeurope")); facet_collector.add_facet(Facet::from("/countryeurope"));
} }

View File

@@ -108,35 +108,49 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// use tantivy::collector::{Count, TopDocs, MultiCollector}; /// use tantivy::collector::{Count, TopDocs, MultiCollector};
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index}; /// use tantivy::{doc, Index, Result};
/// ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let mut index_writer = index.writer(3_000_000).unwrap(); /// let reader = index.reader()?;
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// let searcher = reader.searcher();
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let reader = index.reader().unwrap(); /// let mut collectors = MultiCollector::new();
/// let searcher = reader.searcher(); /// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?;
/// let mut multi_fruit = searcher.search(&query, &collectors)?;
/// ///
/// let mut collectors = MultiCollector::new(); /// let count = count_handle.extract(&mut multi_fruit);
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2)); /// let top_docs = top_docs_handle.extract(&mut multi_fruit);
/// let count_handle = collectors.add_collector(Count);
/// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary").unwrap();
/// let mut multi_fruit = searcher.search(&query, &collectors).unwrap();
/// ///
/// let count = count_handle.extract(&mut multi_fruit); /// # assert_eq!(count, 2);
/// let top_docs = top_docs_handle.extract(&mut multi_fruit); /// # assert_eq!(top_docs.len(), 2);
/// ///
/// assert_eq!(count, 2); /// Ok(())
/// assert_eq!(top_docs.len(), 2); /// }
/// ``` /// ```
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Default)] #[derive(Default)]

View File

@@ -12,9 +12,6 @@ use std::collections::BinaryHeap;
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the /// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
/// default Rust heap is a max heap, whereas a min heap is needed. /// default Rust heap is a max heap, whereas a min heap is needed.
/// ///
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
/// address is used.
///
/// WARNING: equality is not what you would expect here. /// WARNING: equality is not what you would expect here.
/// Two elements are equal if their feature is equal, and regardless of whether `doc` /// Two elements are equal if their feature is equal, and regardless of whether `doc`
/// is equal. This should be perfectly fine for this usage, but let's make sure this /// is equal. This should be perfectly fine for this usage, but let's make sure this
@@ -24,37 +21,29 @@ struct ComparableDoc<T, D> {
doc: D, doc: D,
} }
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> { impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> { impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
#[inline] #[inline]
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
// Reversed to make BinaryHeap work as a min-heap other
let by_feature = other
.feature .feature
.partial_cmp(&self.feature) .partial_cmp(&self.feature)
.unwrap_or(Ordering::Equal); .unwrap_or_else(|| Ordering::Equal)
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
// In case of a tie on the feature, we sort by ascending
// `DocAddress` in order to ensure a stable sorting of the
// documents.
by_feature.then_with(lazy_by_doc_address)
} }
} }
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> { impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal self.cmp(other) == Ordering::Equal
} }
} }
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {} impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
pub(crate) struct TopCollector<T> { pub(crate) struct TopCollector<T> {
limit: usize, limit: usize,
@@ -225,94 +214,4 @@ mod tests {
] ]
); );
} }
#[test]
fn test_top_segment_collector_stable_ordering_for_equal_feature() {
// given that the documents are collected in ascending doc id order,
// when harvesting we have to guarantee stable sorting in case of a tie
// on the score
let doc_ids_collection = [4, 5, 6];
let score = 3.14;
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
for id in &doc_ids_collection {
top_collector_limit_2.collect(*id, score);
}
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
for id in &doc_ids_collection {
top_collector_limit_3.collect(*id, score);
}
assert_eq!(
top_collector_limit_2.harvest(),
top_collector_limit_3.harvest()[..2].to_vec(),
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::TopSegmentCollector;
use test::Bencher;
#[bench]
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 400);
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
let mut score = 1.0;
for i in 0..100 {
score += 1.0;
top_collector.collect(i, score);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
} }

View File

@@ -15,43 +15,54 @@ use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
use std::fmt; use std::fmt;
/// The `TopDocs` collector keeps track of the top `K` documents /// The Top Score Collector keeps track of the K documents
/// sorted by their score. /// sorted by their score.
/// ///
/// The implementation is based on a `BinaryHeap`. /// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents /// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`. /// is `O(n log K)`.
/// ///
/// This collector guarantees a stable sorting in case of a tie on the
/// document score. As such, it is suitable to implement pagination.
///
/// ```rust /// ```rust
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, DocAddress, Index}; /// use tantivy::{doc, DocAddress, Index, Result};
/// ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
/// ///
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); /// let reader = index.reader()?;
/// index_writer.add_document(doc!(title => "The Name of the Wind")); /// let searcher = reader.searcher();
/// index_writer.add_document(doc!(title => "The Diary of Muadib"));
/// index_writer.add_document(doc!(title => "A Dairy Cow"));
/// index_writer.add_document(doc!(title => "The Diary of a Young Girl"));
/// assert!(index_writer.commit().is_ok());
/// ///
/// let reader = index.reader().unwrap(); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let searcher = reader.searcher(); /// let query = query_parser.parse_query("diary")?;
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2))?;
/// ///
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1)));
/// let query = query_parser.parse_query("diary").unwrap(); /// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3)));
/// let top_docs = searcher.search(&query, &TopDocs::with_limit(2)).unwrap();
/// ///
/// assert_eq!(&top_docs[0], &(0.7261542, DocAddress(0, 1))); /// Ok(())
/// assert_eq!(&top_docs[1], &(0.6099695, DocAddress(0, 3))); /// }
/// ``` /// ```
pub struct TopDocs(TopCollector<Score>); pub struct TopDocs(TopCollector<Score>);
@@ -88,12 +99,15 @@ impl TopDocs {
/// # /// #
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!(title => "The Name of the Wind", rating => 92u64)); /// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind",
/// # rating => 92u64,
/// # ));
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64)); /// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64)); /// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64)); /// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # assert!(index_writer.commit().is_ok()); /// # index_writer.commit()?;
/// # let reader = index.reader().unwrap(); /// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?; /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?; /// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
@@ -185,33 +199,27 @@ impl TopDocs {
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::schema::Field; /// use tantivy::schema::Field;
/// ///
/// fn create_schema() -> Schema { /// # fn create_schema() -> Schema {
/// let mut schema_builder = Schema::builder(); /// # let mut schema_builder = Schema::builder();
/// schema_builder.add_text_field("product_name", TEXT); /// # schema_builder.add_text_field("product_name", TEXT);
/// schema_builder.add_u64_field("popularity", FAST); /// # schema_builder.add_u64_field("popularity", FAST);
/// schema_builder.build() /// # schema_builder.build()
/// } /// # }
/// /// #
/// fn create_index() -> tantivy::Result<Index> { /// # fn main() -> tantivy::Result<()> {
/// let schema = create_schema(); /// # let schema = create_schema();
/// let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?; /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// let product_name = index.schema().get_field("product_name").unwrap(); /// # let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// #
/// index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// index_writer.commit()?;
/// Ok(index)
/// }
///
/// let index = create_index().unwrap();
/// let product_name = index.schema().get_field("product_name").unwrap();
/// let popularity: Field = index.schema().get_field("popularity").unwrap(); /// let popularity: Field = index.schema().get_field("popularity").unwrap();
/// /// # index_writer.add_document(doc!(product_name => "The Diary of Muadib", popularity => 1u64));
/// let user_query_str = "diary"; /// # index_writer.add_document(doc!(product_name => "A Dairy Cow", popularity => 10u64));
/// let query_parser = QueryParser::for_index(&index, vec![product_name]); /// # index_writer.add_document(doc!(product_name => "The Diary of a Young Girl", popularity => 15u64));
/// let query = query_parser.parse_query(user_query_str).unwrap(); /// # index_writer.commit()?;
/// // ...
/// # let user_query = "diary";
/// # let query = QueryParser::for_index(&index, vec![product_name]).parse_query(user_query)?;
/// ///
/// // This is where we build our collector with our custom score. /// // This is where we build our collector with our custom score.
/// let top_docs_by_custom_score = TopDocs /// let top_docs_by_custom_score = TopDocs
@@ -238,12 +246,15 @@ impl TopDocs {
/// popularity_boost_score * original_score /// popularity_boost_score * original_score
/// } /// }
/// }); /// });
/// let reader = index.reader().unwrap(); /// # let reader = index.reader()?;
/// let searcher = reader.searcher(); /// # let searcher = reader.searcher();
/// // ... and here are our documents. Note this is a simple vec. /// // ... and here are our documents. Note this is a simple vec.
/// // The `Score` in the pair is our tweaked score. /// // The `Score` in the pair is our tweaked score.
/// let resulting_docs: Vec<(Score, DocAddress)> = /// let resulting_docs: Vec<(Score, DocAddress)> =
/// searcher.search(&query, &top_docs_by_custom_score).unwrap(); /// searcher.search(&*query, &top_docs_by_custom_score)?;
///
/// # Ok(())
/// # }
/// ``` /// ```
/// ///
/// # See also /// # See also
@@ -417,13 +428,12 @@ impl SegmentCollector for TopScoreSegmentCollector {
mod tests { mod tests {
use super::TopDocs; use super::TopDocs;
use crate::collector::Collector; use crate::collector::Collector;
use crate::query::{AllQuery, Query, QueryParser}; use crate::query::{Query, QueryParser};
use crate::schema::{Field, Schema, FAST, STORED, TEXT}; use crate::schema::{Field, Schema, FAST, STORED, TEXT};
use crate::DocAddress; use crate::DocAddress;
use crate::Index; use crate::Index;
use crate::IndexWriter; use crate::IndexWriter;
use crate::Score; use crate::Score;
use itertools::Itertools;
fn make_index() -> Index { fn make_index() -> Index {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -484,29 +494,6 @@ mod tests {
); );
} }
#[test]
fn test_top_collector_stable_sorting() {
let index = make_index();
// using AllQuery to get a constant score
let searcher = index.reader().unwrap().searcher();
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
// precondition for the test to be meaningful: we did get documents
// with the same score
assert!(page_1.iter().map(|result| result.0).all_equal());
assert!(page_2.iter().map(|result| result.0).all_equal());
// sanity check since we're relying on make_index()
assert_eq!(page_1.len(), 2);
assert_eq!(page_2.len(), 3);
assert_eq!(page_1, &page_2[..page_1.len()]);
}
#[test] #[test]
#[should_panic] #[should_panic]
fn test_top_0() { fn test_top_0() {
@@ -564,7 +551,7 @@ mod tests {
)); ));
}); });
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field::from_field_id(2)); let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field(2));
let segment_reader = searcher.segment_reader(0u32); let segment_reader = searcher.segment_reader(0u32);
top_collector top_collector
.for_segment(0, segment_reader) .for_segment(0, segment_reader)

View File

@@ -186,7 +186,7 @@ mod test {
use super::{CompositeFile, CompositeWrite}; use super::{CompositeFile, CompositeWrite};
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory}; use crate::directory::{Directory, RAMDirectory};
use crate::schema::Field; use crate::schema::Field;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;
@@ -199,13 +199,13 @@ mod test {
let w = directory.open_write(path).unwrap(); let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w); let mut composite_write = CompositeWrite::wrap(w);
{ {
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32)); let mut write_0 = composite_write.for_field(Field(0u32));
VInt(32431123u64).serialize(&mut write_0).unwrap(); VInt(32431123u64).serialize(&mut write_0).unwrap();
write_0.flush().unwrap(); write_0.flush().unwrap();
} }
{ {
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32)); let mut write_4 = composite_write.for_field(Field(4u32));
VInt(2).serialize(&mut write_4).unwrap(); VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap(); write_4.flush().unwrap();
} }
@@ -215,18 +215,14 @@ mod test {
let r = directory.open_read(path).unwrap(); let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r).unwrap(); let composite_file = CompositeFile::open(&r).unwrap();
{ {
let file0 = composite_file let file0 = composite_file.open_read(Field(0u32)).unwrap();
.open_read(Field::from_field_id(0u32))
.unwrap();
let mut file0_buf = file0.as_slice(); let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0; let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0); assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64); assert_eq!(payload_0, 32431123u64);
} }
{ {
let file4 = composite_file let file4 = composite_file.open_read(Field(4u32)).unwrap();
.open_read(Field::from_field_id(4u32))
.unwrap();
let mut file4_buf = file4.as_slice(); let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0; let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0); assert_eq!(file4_buf.len(), 0);

View File

@@ -1,6 +1,6 @@
use crate::Result; use crate::Result;
use crossbeam::channel; use crossbeam::channel;
use rayon::{ThreadPool, ThreadPoolBuilder}; use scoped_pool::{Pool, ThreadConfig};
/// Search executor whether search request are single thread or multithread. /// Search executor whether search request are single thread or multithread.
/// ///
@@ -11,7 +11,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
/// used by the client. Second, we may stop using rayon in the future. /// used by the client. Second, we may stop using rayon in the future.
pub enum Executor { pub enum Executor {
SingleThread, SingleThread,
ThreadPool(ThreadPool), ThreadPool(Pool),
} }
impl Executor { impl Executor {
@@ -21,12 +21,10 @@ impl Executor {
} }
// Creates an Executor that dispatches the tasks in a thread pool. // Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> { pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
let pool = ThreadPoolBuilder::new() let thread_config = ThreadConfig::new().prefix(prefix);
.num_threads(num_threads) let pool = Pool::with_thread_config(num_threads, thread_config);
.thread_name(move |num| format!("{}{}", prefix, num)) Executor::ThreadPool(pool)
.build()?;
Ok(Executor::ThreadPool(pool))
} }
// Perform a map in the thread pool. // Perform a map in the thread pool.
@@ -50,9 +48,9 @@ impl Executor {
let num_fruits = args_with_indices.len(); let num_fruits = args_with_indices.len();
let fruit_receiver = { let fruit_receiver = {
let (fruit_sender, fruit_receiver) = channel::unbounded(); let (fruit_sender, fruit_receiver) = channel::unbounded();
pool.scope(|scope| { pool.scoped(|scope| {
for arg_with_idx in args_with_indices { for arg_with_idx in args_with_indices {
scope.spawn(|_| { scope.execute(|| {
let (idx, arg) = arg_with_idx; let (idx, arg) = arg_with_idx;
let fruit = f(arg); let fruit = f(arg);
if let Err(err) = fruit_sender.send((idx, fruit)) { if let Err(err) = fruit_sender.send((idx, fruit)) {
@@ -105,7 +103,6 @@ mod tests {
#[should_panic] //< unfortunately the panic message is not propagated #[should_panic] //< unfortunately the panic message is not propagated
fn test_panic_propagates_multi_thread() { fn test_panic_propagates_multi_thread() {
let _result: Vec<usize> = Executor::multi_thread(1, "search-test") let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
.unwrap()
.map( .map(
|_| { |_| {
panic!("panic should propagate"); panic!("panic should propagate");
@@ -129,7 +126,6 @@ mod tests {
#[test] #[test]
fn test_map_multithread() { fn test_map_multithread() {
let result: Vec<usize> = Executor::multi_thread(3, "search-test") let result: Vec<usize> = Executor::multi_thread(3, "search-test")
.unwrap()
.map(|i| Ok(i * 2), 0..10) .map(|i| Ok(i * 2), 0..10)
.unwrap(); .unwrap();
assert_eq!(result.len(), 10); assert_eq!(result.len(), 10);

View File

@@ -73,16 +73,15 @@ impl Index {
/// Replace the default single thread search executor pool /// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads. /// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> Result<()> { pub fn set_multithread_executor(&mut self, num_threads: usize) {
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?); self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
Ok(())
} }
/// Replace the default single thread search executor pool /// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads. /// by a thread pool with a given number of threads.
pub fn set_default_multithread_executor(&mut self) -> Result<()> { pub fn set_default_multithread_executor(&mut self) {
let default_num_threads = num_cpus::get(); let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads) self.set_multithread_executor(default_num_threads);
} }
/// Creates a new index using the `RAMDirectory`. /// Creates a new index using the `RAMDirectory`.
@@ -104,21 +103,23 @@ impl Index {
if Index::exists(&mmap_directory) { if Index::exists(&mmap_directory) {
return Err(TantivyError::IndexAlreadyExists); return Err(TantivyError::IndexAlreadyExists);
} }
Index::create(mmap_directory, schema) Index::create(mmap_directory, schema)
} }
/// Opens or creates a new index in the provided directory /// Opens or creates a new index in the provided directory
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> { pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
if !Index::exists(&dir) { if Index::exists(&dir) {
return Index::create(dir, schema); let index = Index::open(dir)?;
} if index.schema() == schema {
let index = Index::open(dir)?; Ok(index)
if index.schema() == schema { } else {
Ok(index) Err(TantivyError::SchemaError(
"An index exists but the schema does not match.".to_string(),
))
}
} else { } else {
Err(TantivyError::SchemaError( Index::create(dir, schema)
"An index exists but the schema does not match.".to_string(),
))
} }
} }
@@ -338,7 +339,7 @@ impl Index {
/// Creates a new segment. /// Creates a new segment.
pub fn new_segment(&self) -> Segment { pub fn new_segment(&self) -> Segment {
let mut segment_meta = self let segment_meta = self
.inventory .inventory
.new_segment_meta(SegmentId::generate_random(), 0); .new_segment_meta(SegmentId::generate_random(), 0);
self.segment(segment_meta) self.segment(segment_meta)
@@ -386,9 +387,12 @@ mod tests {
use crate::directory::RAMDirectory; use crate::directory::RAMDirectory;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::{Schema, INDEXED, TEXT}; use crate::schema::{Schema, INDEXED, TEXT};
use crate::Index;
use crate::IndexReader; use crate::IndexReader;
use crate::IndexWriter;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use crate::{Directory, Index}; use std::thread;
use std::time::Duration;
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
@@ -466,14 +470,14 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &index, &reader); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_specific { mod mmap_specific {
use super::*; use super::*;
use crate::Directory;
use std::path::PathBuf; use std::path::PathBuf;
use tempfile::TempDir; use tempfile::TempDir;
@@ -484,20 +488,22 @@ mod tests {
let tempdir = TempDir::new().unwrap(); let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap(); let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::OnCommit) .reload_policy(ReloadPolicy::OnCommit)
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &index, &reader); test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
#[test] #[test]
fn test_index_manual_policy_mmap() { fn test_index_manual_policy_mmap() {
let schema = throw_away_schema(); let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap(); let field = schema.get_field("num_likes").unwrap();
let mut index = Index::create_from_tempdir(schema).unwrap(); let index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap(); writer.commit().unwrap();
let reader = index let reader = index
@@ -507,12 +513,8 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = index.directory_mut().watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap(); writer.commit().unwrap();
assert!(receiver.recv().is_ok()); thread::sleep(Duration::from_millis(500));
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
reader.reload().unwrap(); reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 1); assert_eq!(reader.searcher().num_docs(), 1);
@@ -532,26 +534,39 @@ mod tests {
.try_into() .try_into()
.unwrap(); .unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &write_index, &reader); let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
} }
} }
fn test_index_on_commit_reload_policy_aux(field: Field, index: &Index, reader: &IndexReader) { fn test_index_on_commit_reload_policy_aux(
let mut reader_index = reader.index(); field: Field,
let (sender, receiver) = crossbeam::channel::unbounded(); writer: &mut IndexWriter,
let _watch_handle = reader_index.directory_mut().watch(Box::new(move || { reader: &IndexReader,
let _ = sender.send(()); ) {
}));
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert_eq!(reader.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64)); writer.add_document(doc!(field=>1u64));
writer.commit().unwrap(); writer.commit().unwrap();
assert!(receiver.recv().is_ok()); let mut count = 0;
assert_eq!(reader.searcher().num_docs(), 1); for _ in 0..100 {
count = reader.searcher().num_docs();
if count > 0 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 1);
writer.add_document(doc!(field=>2u64)); writer.add_document(doc!(field=>2u64));
writer.commit().unwrap(); writer.commit().unwrap();
assert!(receiver.recv().is_ok()); let mut count = 0;
assert_eq!(reader.searcher().num_docs(), 2); for _ in 0..10 {
count = reader.searcher().num_docs();
if count > 1 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 2);
} }
// This test will not pass on windows, because windows // This test will not pass on windows, because windows
@@ -568,13 +583,9 @@ mod tests {
for i in 0u64..8_000u64 { for i in 0u64..8_000u64 {
writer.add_document(doc!(field => i)); writer.add_document(doc!(field => i));
} }
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle = directory.watch(Box::new(move || {
let _ = sender.send(());
}));
writer.commit().unwrap(); writer.commit().unwrap();
let mem_right_after_commit = directory.total_mem_usage(); let mem_right_after_commit = directory.total_mem_usage();
assert!(receiver.recv().is_ok()); thread::sleep(Duration::from_millis(1_000));
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
@@ -588,11 +599,6 @@ mod tests {
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 8_000); assert_eq!(searcher.num_docs(), 8_000);
assert!( assert!(mem_right_after_merge_finished < mem_right_after_commit);
mem_right_after_merge_finished < mem_right_after_commit,
"(mem after merge){} is expected < (mem before merge){}",
mem_right_after_merge_finished,
mem_right_after_commit
);
} }
} }

View File

@@ -35,7 +35,6 @@ impl SegmentMetaInventory {
segment_id, segment_id,
max_doc, max_doc,
deletes: None, deletes: None,
bundled: false,
}; };
SegmentMeta::from(self.inventory.track(inner)) SegmentMeta::from(self.inventory.track(inner))
} }
@@ -82,19 +81,6 @@ impl SegmentMeta {
self.tracked.segment_id self.tracked.segment_id
} }
pub fn with_bundled(self) -> SegmentMeta {
SegmentMeta::from(self.tracked.map(|inner| InnerSegmentMeta {
segment_id: inner.segment_id,
max_doc: inner.max_doc,
deletes: inner.deletes.clone(),
bundled: true,
}))
}
pub fn is_bundled(&self) -> bool {
self.tracked.bundled
}
/// Returns the number of deleted documents. /// Returns the number of deleted documents.
pub fn num_deleted_docs(&self) -> u32 { pub fn num_deleted_docs(&self) -> u32 {
self.tracked self.tracked
@@ -121,12 +107,8 @@ impl SegmentMeta {
/// It just joins the segment id with the extension /// It just joins the segment id with the extension
/// associated to a segment component. /// associated to a segment component.
pub fn relative_path(&self, component: SegmentComponent) -> PathBuf { pub fn relative_path(&self, component: SegmentComponent) -> PathBuf {
let suffix = self.suffix(component); let mut path = self.id().uuid_string();
self.relative_path_from_suffix(&suffix) path.push_str(&*match component {
}
fn suffix(&self, component: SegmentComponent) -> String {
match component {
SegmentComponent::POSTINGS => ".idx".to_string(), SegmentComponent::POSTINGS => ".idx".to_string(),
SegmentComponent::POSITIONS => ".pos".to_string(), SegmentComponent::POSITIONS => ".pos".to_string(),
SegmentComponent::POSITIONSSKIP => ".posidx".to_string(), SegmentComponent::POSITIONSSKIP => ".posidx".to_string(),
@@ -135,17 +117,7 @@ impl SegmentMeta {
SegmentComponent::FASTFIELDS => ".fast".to_string(), SegmentComponent::FASTFIELDS => ".fast".to_string(),
SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(), SegmentComponent::FIELDNORMS => ".fieldnorm".to_string(),
SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)), SegmentComponent::DELETE => format!(".{}.del", self.delete_opstamp().unwrap_or(0)),
} });
}
/// Returns the relative path of a component of our segment.
///
/// It just joins the segment id with the extension
/// associated to a segment component.
pub fn relative_path_from_suffix(&self, suffix: &str) -> PathBuf {
let mut path = self.id().uuid_string();
path.push_str(".");
path.push_str(&suffix);
PathBuf::from(path) PathBuf::from(path)
} }
@@ -178,22 +150,6 @@ impl SegmentMeta {
self.num_deleted_docs() > 0 self.num_deleted_docs() > 0
} }
/// Updates the max_doc value from the `SegmentMeta`.
///
/// This method is only used when updating `max_doc` from 0
/// as we finalize a fresh new segment.
pub(crate) fn with_max_doc(self, max_doc: u32) -> SegmentMeta {
assert_eq!(self.tracked.max_doc, 0);
assert!(self.tracked.deletes.is_none());
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
segment_id: inner_meta.segment_id,
max_doc,
deletes: None,
bundled: inner_meta.bundled,
});
SegmentMeta { tracked }
}
#[doc(hidden)] #[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
let delete_meta = DeleteMeta { let delete_meta = DeleteMeta {
@@ -204,7 +160,6 @@ impl SegmentMeta {
segment_id: inner_meta.segment_id, segment_id: inner_meta.segment_id,
max_doc: inner_meta.max_doc, max_doc: inner_meta.max_doc,
deletes: Some(delete_meta), deletes: Some(delete_meta),
bundled: inner_meta.bundled,
}); });
SegmentMeta { tracked } SegmentMeta { tracked }
} }
@@ -215,7 +170,6 @@ struct InnerSegmentMeta {
segment_id: SegmentId, segment_id: SegmentId,
max_doc: u32, max_doc: u32,
deletes: Option<DeleteMeta>, deletes: Option<DeleteMeta>,
bundled: bool,
} }
impl InnerSegmentMeta { impl InnerSegmentMeta {
@@ -331,9 +285,6 @@ mod tests {
payload: None, payload: None,
}; };
let json = serde_json::ser::to_string(&index_metas).expect("serialization failed"); let json = serde_json::ser::to_string(&index_metas).expect("serialization failed");
assert_eq!( assert_eq!(json, r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#);
json,
r#"{"segments":[],"schema":[{"name":"text","type":"text","options":{"indexing":{"record":"position","tokenizer":"default"},"stored":false}}],"opstamp":0}"#
);
} }
} }

View File

@@ -4,12 +4,14 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::directory::error::{OpenReadError, OpenWriteError}; use crate::directory::error::{OpenReadError, OpenWriteError};
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::{ReadOnlyDirectory, ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::result;
/// A segment is a piece of the index. /// A segment is a piece of the index.
#[derive(Clone)] #[derive(Clone)]
@@ -48,17 +50,6 @@ impl Segment {
&self.meta &self.meta
} }
/// Updates the max_doc value from the `SegmentMeta`.
///
/// This method is only used when updating `max_doc` from 0
/// as we finalize a fresh new segment.
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
Segment {
index: self.index,
meta: self.meta.with_max_doc(max_doc),
}
}
#[doc(hidden)] #[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
Segment { Segment {
@@ -81,30 +72,23 @@ impl Segment {
} }
/// Open one of the component file for a *regular* read. /// Open one of the component file for a *regular* read.
pub fn open_read(&self, component: SegmentComponent) -> Result<ReadOnlySource, OpenReadError> { pub fn open_read(
&self,
component: SegmentComponent,
) -> result::Result<ReadOnlySource, OpenReadError> {
let path = self.relative_path(component); let path = self.relative_path(component);
let source = self.index.directory().open_read(&path)?; let source = self.index.directory().open_read(&path)?;
Ok(source) Ok(source)
} }
/// Open one of the component file for *regular* write. /// Open one of the component file for *regular* write.
pub fn open_write(&mut self, component: SegmentComponent) -> Result<WritePtr, OpenWriteError> { pub fn open_write(
let path = self.relative_path(component);
self.index.directory_mut().open_write(&path)
}
pub fn open_bundle_writer(&mut self) -> Result<WritePtr, OpenWriteError> {
let path = self.meta.relative_path_from_suffix("bundle");
self.index.directory_mut().open_write(&path)
}
pub(crate) fn open_write_in_directory(
&mut self, &mut self,
component: SegmentComponent, component: SegmentComponent,
directory: &mut dyn Directory, ) -> result::Result<WritePtr, OpenWriteError> {
) -> Result<WritePtr, OpenWriteError> {
let path = self.relative_path(component); let path = self.relative_path(component);
directory.open_write(&path) let write = self.index.directory_mut().open_write(&path)?;
Ok(write)
} }
} }
@@ -114,5 +98,5 @@ pub trait SerializableSegment {
/// ///
/// # Returns /// # Returns
/// The number of documents in the segment. /// The number of documents in the segment.
fn write(&self, serializer: SegmentSerializer) -> crate::Result<u32>; fn write(&self, serializer: SegmentSerializer) -> Result<u32>;
} }

View File

@@ -76,7 +76,7 @@ impl SegmentId {
} }
/// Error type used when parsing a `SegmentId` from a string fails. /// Error type used when parsing a `SegmentId` from a string fails.
pub struct SegmentIdParseError(uuid::Error); pub struct SegmentIdParseError(uuid::parser::ParseError);
impl Error for SegmentIdParseError {} impl Error for SegmentIdParseError {}

View File

@@ -1,97 +0,0 @@
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::OpenReadError;
use crate::directory::ReadOnlySource;
use crate::error::DataCorruption;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
#[derive(Clone)]
struct BundleDirectory {
source_map: Arc<HashMap<PathBuf, ReadOnlySource>>,
}
impl BundleDirectory {
pub fn from_source(source: ReadOnlySource) -> Result<BundleDirectory, DataCorruption> {
let mut index_offset_buf = [0u8; 8];
let (body_idx, footer_offset) = source.split_from_end(8);
index_offset_buf.copy_from_slice(footer_offset.as_slice());
let offset = u64::from_le_bytes(index_offset_buf);
let (body_source, idx_source) = body_idx.split(offset as usize);
let idx: HashMap<PathBuf, (u64, u64)> = serde_json::from_slice(idx_source.as_slice())
.map_err(|err| {
let msg = format!("Failed to read index from bundle. {:?}", err);
DataCorruption::comment_only(msg)
})?;
let source_map: HashMap<PathBuf, ReadOnlySource> = idx
.into_iter()
.map(|(path, (start, stop))| {
let source = body_source.slice(start as usize, stop as usize);
(path, source)
})
.collect();
Ok(BundleDirectory {
source_map: Arc::new(source_map),
})
}
}
impl ReadOnlyDirectory for BundleDirectory {
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
self.source_map
.get(path)
.cloned()
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))
}
fn exists(&self, path: &Path) -> bool {
self.source_map.contains_key(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let source = self
.source_map
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(path.to_path_buf()))?;
Ok(source.as_slice().to_vec())
}
}
#[cfg(test)]
mod tests {
use super::BundleDirectory;
use crate::directory::{RAMDirectory, ReadOnlyDirectory, TerminatingWrite};
use crate::Directory;
use std::io::Write;
use std::path::Path;
#[test]
fn test_bundle_directory() {
let mut ram_directory = RAMDirectory::default();
let test_path_atomic = Path::new("testpath_atomic");
let test_path_wrt = Path::new("testpath_wrt");
assert!(ram_directory
.atomic_write(test_path_atomic, b"titi")
.is_ok());
{
let mut test_wrt = ram_directory.open_write(test_path_wrt).unwrap();
assert!(test_wrt.write_all(b"toto").is_ok());
assert!(test_wrt.terminate().is_ok());
}
let mut dest_directory = RAMDirectory::default();
let bundle_path = Path::new("bundle");
let mut wrt = dest_directory.open_write(bundle_path).unwrap();
assert!(ram_directory.serialize_bundle(&mut wrt).is_ok());
assert!(wrt.terminate().is_ok());
let source = dest_directory.open_read(bundle_path).unwrap();
let bundle_directory = BundleDirectory::from_source(source).unwrap();
assert_eq!(
&bundle_directory.atomic_read(test_path_atomic).unwrap()[..],
b"titi"
);
assert_eq!(
&bundle_directory.open_read(test_path_wrt).unwrap()[..],
b"toto"
);
}
}

View File

@@ -100,30 +100,6 @@ fn retry_policy(is_blocking: bool) -> RetryPolicy {
} }
} }
pub trait ReadOnlyDirectory {
/// Opens a virtual file for read.
///
/// Once a virtual file is open, its data may not
/// change.
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [Directory::open_write].
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [Directory::atomic_write].
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
}
/// Write-once read many (WORM) abstraction for where /// Write-once read many (WORM) abstraction for where
/// tantivy's data should be stored. /// tantivy's data should be stored.
/// ///
@@ -134,9 +110,18 @@ pub trait ReadOnlyDirectory {
/// - The [`RAMDirectory`](struct.RAMDirectory.html), which /// - The [`RAMDirectory`](struct.RAMDirectory.html), which
/// should be used mostly for tests. /// should be used mostly for tests.
/// ///
pub trait Directory: pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
DirectoryClone + ReadOnlyDirectory + fmt::Debug + Send + Sync + 'static /// Opens a virtual file for read.
{ ///
/// Once a virtual file is open, its data may not
/// change.
///
/// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file /// Removes a file
/// ///
/// Removing a file will not affect an eventual /// Removing a file will not affect an eventual
@@ -146,6 +131,9 @@ pub trait Directory:
/// `DeleteError::DoesNotExist`. /// `DeleteError::DoesNotExist`.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError>; fn delete(&self, path: &Path) -> result::Result<(), DeleteError>;
/// Returns true iff the file exists
fn exists(&self, path: &Path) -> bool;
/// Opens a writer for the *virtual file* associated with /// Opens a writer for the *virtual file* associated with
/// a Path. /// a Path.
/// ///
@@ -167,6 +155,14 @@ pub trait Directory:
/// The file may not previously exist. /// The file may not previously exist.
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>; fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError>;
/// Reads the full content file that has been written using
/// atomic_write.
///
/// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data. /// Atomically replace the content of a file with data.
/// ///
/// This calls ensure that reads can never *observe* /// This calls ensure that reads can never *observe*
@@ -201,7 +197,7 @@ pub trait Directory:
/// Registers a callback that will be called whenever a change on the `meta.json` /// Registers a callback that will be called whenever a change on the `meta.json`
/// using the `atomic_write` API is detected. /// using the `atomic_write` API is detected.
/// ///
/// The behavior when using `.watch()` on a file using [Directory::open_write] is, on the other /// The behavior when using `.watch()` on a file using `.open_write(...)` is, on the other
/// hand, undefined. /// hand, undefined.
/// ///
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is /// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is

View File

@@ -1,4 +1,3 @@
use crate::Version;
use std::error::Error as StdError; use std::error::Error as StdError;
use std::fmt; use std::fmt;
use std::io; use std::io;
@@ -157,65 +156,6 @@ impl StdError for OpenWriteError {
} }
} }
/// Type of index incompatibility between the library and the index found on disk
/// Used to catch and provide a hint to solve this incompatibility issue
pub enum Incompatibility {
/// This library cannot decompress the index found on disk
CompressionMismatch {
/// Compression algorithm used by the current version of tantivy
library_compression_format: String,
/// Compression algorithm that was used to serialise the index
index_compression_format: String,
},
/// The index format found on disk isn't supported by this version of the library
IndexMismatch {
/// Version used by the library
library_version: Version,
/// Version the index was built with
index_version: Version,
},
}
impl fmt::Debug for Incompatibility {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
Incompatibility::CompressionMismatch {
library_compression_format,
index_compression_format,
} => {
let err = format!(
"Library was compiled with {:?} compression, index was compressed with {:?}",
library_compression_format, index_compression_format
);
let advice = format!(
"Change the feature flag to {:?} and rebuild the library",
index_compression_format
);
write!(f, "{}. {}", err, advice)?;
}
Incompatibility::IndexMismatch {
library_version,
index_version,
} => {
let err = format!(
"Library version: {}, index version: {}",
library_version.index_format_version, index_version.index_format_version
);
// TODO make a more useful error message
// include the version range that supports this index_format_version
let advice = format!(
"Change tantivy to a version compatible with index format {} (e.g. {}.{}.x) \
and rebuild your project.",
index_version.index_format_version, index_version.major, index_version.minor
);
write!(f, "{}. {}", err, advice)?;
}
}
Ok(())
}
}
/// Error that may occur when accessing a file read /// Error that may occur when accessing a file read
#[derive(Debug)] #[derive(Debug)]
pub enum OpenReadError { pub enum OpenReadError {
@@ -224,8 +164,6 @@ pub enum OpenReadError {
/// Any kind of IO error that happens when /// Any kind of IO error that happens when
/// interacting with the underlying IO device. /// interacting with the underlying IO device.
IOError(IOError), IOError(IOError),
/// This library doesn't support the index version found on disk
IncompatibleIndex(Incompatibility),
} }
impl From<IOError> for OpenReadError { impl From<IOError> for OpenReadError {
@@ -245,9 +183,19 @@ impl fmt::Display for OpenReadError {
"an io error occurred while opening a file for reading: '{}'", "an io error occurred while opening a file for reading: '{}'",
err err
), ),
OpenReadError::IncompatibleIndex(ref footer) => { }
write!(f, "Incompatible index format: {:?}", footer) }
} }
impl StdError for OpenReadError {
fn description(&self) -> &str {
"error occurred while opening a file for reading"
}
fn cause(&self) -> Option<&dyn StdError> {
match *self {
OpenReadError::FileDoesNotExist(_) => None,
OpenReadError::IOError(ref err) => Some(err),
} }
} }
} }
@@ -268,12 +216,6 @@ impl From<IOError> for DeleteError {
} }
} }
impl From<Incompatibility> for OpenReadError {
fn from(incompatibility: Incompatibility) -> Self {
OpenReadError::IncompatibleIndex(incompatibility)
}
}
impl fmt::Display for DeleteError { impl fmt::Display for DeleteError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self { match *self {

View File

@@ -1,175 +1,159 @@
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt};
use crate::directory::error::Incompatibility;
use crate::directory::read_only_source::ReadOnlySource; use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite}; use crate::directory::{AntiCallToken, TerminatingWrite};
use crate::Version; use byteorder::{ByteOrder, LittleEndian};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use crc32fast::Hasher; use crc32fast::Hasher;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
type CrcHashU32 = u32; const COMMON_FOOTER_SIZE: usize = 4 * 5;
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct Footer { pub struct Footer {
pub version: Version, pub tantivy_version: (u32, u32, u32),
pub meta: String, pub meta: String,
pub versioned_footer: VersionedFooter, pub versioned_footer: VersionedFooter,
} }
/// Serialises the footer to a byte-array
/// - versioned_footer_len : 4 bytes
///- versioned_footer: variable bytes
/// - meta_len: 4 bytes
/// - meta: variable bytes
/// - version_len: 4 bytes
/// - version json: variable bytes
impl BinarySerializable for Footer {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
BinarySerializable::serialize(&self.versioned_footer, writer)?;
BinarySerializable::serialize(&self.meta, writer)?;
let version_string =
serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?;
BinarySerializable::serialize(&version_string, writer)?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let versioned_footer = VersionedFooter::deserialize(reader)?;
let meta = String::deserialize(reader)?;
let version_json = String::deserialize(reader)?;
let version = serde_json::from_str(&version_json)?;
Ok(Footer {
version,
meta,
versioned_footer,
})
}
}
impl Footer { impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self { pub fn new(versioned_footer: VersionedFooter) -> Self {
let version = crate::VERSION.clone(); let tantivy_version = (
let meta = version.to_string(); env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer { Footer {
version, tantivy_version,
meta, meta: format!(
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer, versioned_footer,
} }
} }
pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> { pub fn to_bytes(&self) -> Vec<u8> {
let mut counting_write = CountingWriter::wrap(&mut write); let mut res = self.versioned_footer.to_bytes();
self.serialize(&mut counting_write)?; res.extend_from_slice(self.meta.as_bytes());
let written_len = counting_write.written_bytes(); let len = res.len();
write.write_u32::<LittleEndian>(written_len as u32)?; res.resize(len + COMMON_FOOTER_SIZE, 0);
Ok(()) let mut common_footer = &mut res[len..];
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
} }
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> { pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
if source.len() < 4 { let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::UnexpectedEof, io::ErrorKind::UnexpectedEof,
format!( format!(
"File corrupted. The file is smaller than 4 bytes (len={}).", "File corrupted. The footer len is {}, while the entire file len is {}",
source.len() size, len
), ),
)); ));
} }
let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES); let footer = &data[len - size as usize..];
let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize; let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let body_len = body_footer.len() - footer_len; let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let (body, footer_data) = body_footer.split(body_len); let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let mut cursor = footer_data.as_slice(); let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
let footer = Footer::deserialize(&mut cursor)?; Ok(Footer {
Ok((footer, body)) tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
} }
/// Confirms that the index will be read correctly by this version of tantivy pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
/// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory let footer = Footer::from_bytes(source.as_slice())?;
pub fn is_compatible(&self) -> Result<(), Incompatibility> { let reader = source.slice_to(source.as_slice().len() - footer.size());
let library_version = crate::version(); Ok((footer, reader))
match &self.versioned_footer { }
VersionedFooter::V1 {
crc32: _crc, pub fn size(&self) -> usize {
store_compression: compression, self.versioned_footer.size() as usize + self.meta.len() + 20
} => {
if &library_version.store_compression != compression {
return Err(Incompatibility::CompressionMismatch {
library_compression_format: library_version.store_compression.to_string(),
index_compression_format: compression.to_string(),
});
}
Ok(())
}
VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch {
library_version: library_version.clone(),
index_version: self.version.clone(),
}),
}
} }
} }
/// Footer that includes a crc32 hash that enables us to checksum files in the index
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter { pub enum VersionedFooter {
UnknownVersion, UnknownVersion { version: u32, size: u32 },
V1 { V0(u32), // crc
crc32: CrcHashU32,
store_compression: String,
},
}
impl BinarySerializable for VersionedFooter {
fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut buf = Vec::new();
match self {
VersionedFooter::V1 {
crc32,
store_compression: compression,
} => {
// Serializes a valid `VersionedFooter` or panics if the version is unknown
// [ version | crc_hash | compression_mode ]
// [ 0..4 | 4..8 | variable ]
BinarySerializable::serialize(&1u32, &mut buf)?;
BinarySerializable::serialize(crc32, &mut buf)?;
BinarySerializable::serialize(compression, &mut buf)?;
}
VersionedFooter::UnknownVersion => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Cannot serialize an unknown versioned footer ",
));
}
}
BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?;
writer.write_all(&buf[..])?;
Ok(())
}
fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let len = VInt::deserialize(reader)?.0 as usize;
let mut buf = vec![0u8; len];
reader.read_exact(&mut buf[..])?;
let mut cursor = &buf[..];
let version = u32::deserialize(&mut cursor)?;
if version == 1 {
let crc32 = u32::deserialize(&mut cursor)?;
let compression = String::deserialize(&mut cursor)?;
Ok(VersionedFooter::V1 {
crc32,
store_compression: compression,
})
} else {
Ok(VersionedFooter::UnknownVersion)
}
}
} }
impl VersionedFooter { impl VersionedFooter {
pub fn crc(&self) -> Option<CrcHashU32> { pub fn to_bytes(&self) -> Vec<u8> {
match self { match self {
VersionedFooter::V1 { crc32, .. } => Some(*crc32), VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
}
}
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
assert!(footer.len() >= 4);
let version = LittleEndian::read_u32(footer);
match version {
0 => {
if footer.len() == 8 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
} else {
Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The versioned footer len is {}, while it should be 8",
footer.len()
),
))
}
}
version => Ok(VersionedFooter::UnknownVersion {
version,
size: footer.len() as u32,
}),
}
}
pub fn size(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 8,
VersionedFooter::UnknownVersion { size, .. } => *size,
}
}
pub fn version(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 0,
VersionedFooter::UnknownVersion { version, .. } => *version,
}
}
pub fn crc(&self) -> Option<u32> {
match self {
VersionedFooter::V0(crc) => Some(*crc),
VersionedFooter::UnknownVersion { .. } => None, VersionedFooter::UnknownVersion { .. } => None,
} }
} }
@@ -205,135 +189,25 @@ impl<W: TerminatingWrite> Write for FooterProxy<W> {
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> { impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> { fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc32 = self.hasher.take().unwrap().finalize(); let crc = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V1 {
crc32, let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
store_compression: crate::store::COMPRESSION.to_string(),
});
let mut writer = self.writer.take().unwrap(); let mut writer = self.writer.take().unwrap();
footer.append_footer(&mut writer)?; writer.write_all(&footer)?;
writer.terminate() writer.terminate()
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::CrcHashU32;
use super::FooterProxy;
use crate::common::BinarySerializable;
use crate::directory::footer::{Footer, VersionedFooter}; use crate::directory::footer::{Footer, VersionedFooter};
use crate::directory::TerminatingWrite;
use byteorder::{ByteOrder, LittleEndian};
use regex::Regex;
#[test]
fn test_versioned_footer() {
let mut vec = Vec::new();
let footer_proxy = FooterProxy::new(&mut vec);
assert!(footer_proxy.terminate().is_ok());
assert_eq!(vec.len(), 167);
let footer = Footer::deserialize(&mut &vec[..]).unwrap();
if let VersionedFooter::V1 {
crc32: _,
store_compression,
} = footer.versioned_footer
{
assert_eq!(store_compression, crate::store::COMPRESSION);
} else {
panic!("Versioned footer should be V1.");
}
assert_eq!(&footer.version, crate::version());
}
#[test] #[test]
fn test_serialize_deserialize_footer() { fn test_serialize_deserialize_footer() {
let mut buffer = Vec::new(); let crc = 123456;
let crc32 = 123456u32; let footer = Footer::new(VersionedFooter::V0(crc));
let footer: Footer = Footer::new(VersionedFooter::V1 { let footer_bytes = footer.to_bytes();
crc32,
store_compression: "lz4".to_string(),
});
footer.serialize(&mut buffer).unwrap();
let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap();
assert_eq!(footer_deser, footer);
}
#[test] assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
fn footer_length() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let mut buf = Vec::new();
versioned_footer.serialize(&mut buf).unwrap();
assert_eq!(buf.len(), 13);
let footer = Footer::new(versioned_footer);
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
assert!(regex_ptn.is_match(&footer.meta));
}
#[test]
fn versioned_footer_from_bytes() {
let v_footer_bytes = vec![
// versionned footer length
12 | 128,
// index format version
1,
0,
0,
0,
// crc 32
12,
35,
89,
18,
// compression format
3 | 128,
b'l',
b'z',
b'4',
];
let mut cursor = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap();
assert!(cursor.is_empty());
let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32;
let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 {
crc32: expected_crc,
store_compression: "lz4".to_string(),
};
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buffer = Vec::new();
assert!(versioned_footer.serialize(&mut buffer).is_ok());
assert_eq!(&v_footer_bytes[..], &buffer[..]);
}
#[test]
fn versioned_footer_panic() {
let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8];
let mut b = &v_footer_bytes[..];
let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap();
assert!(b.is_empty());
let expected_versioned_footer = VersionedFooter::UnknownVersion;
assert_eq!(versioned_footer, expected_versioned_footer);
let mut buf = Vec::new();
assert!(versioned_footer.serialize(&mut buf).is_err());
}
#[test]
#[cfg(not(feature = "lz4"))]
fn compression_mismatch() {
let crc32 = 1111111u32;
let versioned_footer = VersionedFooter::V1 {
crc32,
store_compression: "lz4".to_string(),
};
let footer = Footer::new(versioned_footer);
let res = footer.is_compatible();
assert!(res.is_err());
} }
} }

View File

@@ -2,15 +2,13 @@ use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy}; use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::GarbageCollectionResult;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::META_LOCK; use crate::directory::META_LOCK;
use crate::directory::{ReadOnlySource, WritePtr}; use crate::directory::{ReadOnlySource, WritePtr};
use crate::directory::{WatchCallback, WatchHandle}; use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
use crate::Result;
use crate::directory::directory::ReadOnlyDirectory;
use crc32fast::Hasher; use crc32fast::Hasher;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
@@ -66,7 +64,7 @@ fn save_managed_paths(
impl ManagedDirectory { impl ManagedDirectory {
/// Wraps a directory as managed directory. /// Wraps a directory as managed directory.
pub fn wrap<Dir: Directory>(directory: Dir) -> crate::Result<ManagedDirectory> { pub fn wrap<Dir: Directory>(directory: Dir) -> Result<ManagedDirectory> {
match directory.atomic_read(&MANAGED_FILEPATH) { match directory.atomic_read(&MANAGED_FILEPATH) {
Ok(data) => { Ok(data) => {
let managed_files_json = String::from_utf8_lossy(&data); let managed_files_json = String::from_utf8_lossy(&data);
@@ -89,11 +87,6 @@ impl ManagedDirectory {
meta_informations: Arc::default(), meta_informations: Arc::default(),
}), }),
Err(OpenReadError::IOError(e)) => Err(From::from(e)), Err(OpenReadError::IOError(e)) => Err(From::from(e)),
Err(OpenReadError::IncompatibleIndex(incompatibility)) => {
// For the moment, this should never happen `meta.json`
// do not have any footer and cannot detect incompatibility.
Err(crate::TantivyError::IncompatibleIndex(incompatibility))
}
} }
} }
@@ -111,10 +104,7 @@ impl ManagedDirectory {
/// If a file cannot be deleted (for permission reasons for instance) /// If a file cannot be deleted (for permission reasons for instance)
/// an error is simply logged, and the file remains in the list of managed /// an error is simply logged, and the file remains in the list of managed
/// files. /// files.
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>( pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) {
&mut self,
get_living_files: L,
) -> crate::Result<GarbageCollectionResult> {
info!("Garbage collect"); info!("Garbage collect");
let mut files_to_delete = vec![]; let mut files_to_delete = vec![];
@@ -140,25 +130,19 @@ impl ManagedDirectory {
// 2) writer change meta.json (for instance after a merge or a commit) // 2) writer change meta.json (for instance after a merge or a commit)
// 3) gc kicks in. // 3) gc kicks in.
// 4) gc removes a file that was useful for process B, before process B opened it. // 4) gc removes a file that was useful for process B, before process B opened it.
match self.acquire_lock(&META_LOCK) { if let Ok(_meta_lock) = self.acquire_lock(&META_LOCK) {
Ok(_meta_lock) => { let living_files = get_living_files();
let living_files = get_living_files(); for managed_path in &meta_informations_rlock.managed_paths {
for managed_path in &meta_informations_rlock.managed_paths { if !living_files.contains(managed_path) {
if !living_files.contains(managed_path) { files_to_delete.push(managed_path.clone());
files_to_delete.push(managed_path.clone());
}
} }
} }
Err(err) => { } else {
error!("Failed to acquire lock for GC"); error!("Failed to acquire lock for GC");
return Err(crate::Error::from(err));
}
} }
} }
let mut failed_to_delete_files = vec![];
let mut deleted_files = vec![]; let mut deleted_files = vec![];
for file_to_delete in files_to_delete { for file_to_delete in files_to_delete {
match self.delete(&file_to_delete) { match self.delete(&file_to_delete) {
Ok(_) => { Ok(_) => {
@@ -168,10 +152,9 @@ impl ManagedDirectory {
Err(file_error) => { Err(file_error) => {
match file_error { match file_error {
DeleteError::FileDoesNotExist(_) => { DeleteError::FileDoesNotExist(_) => {
deleted_files.push(file_to_delete.clone()); deleted_files.push(file_to_delete);
} }
DeleteError::IOError(_) => { DeleteError::IOError(_) => {
failed_to_delete_files.push(file_to_delete.clone());
if !cfg!(target_os = "windows") { if !cfg!(target_os = "windows") {
// On windows, delete is expected to fail if the file // On windows, delete is expected to fail if the file
// is mmapped. // is mmapped.
@@ -194,13 +177,10 @@ impl ManagedDirectory {
for delete_file in &deleted_files { for delete_file in &deleted_files {
managed_paths_write.remove(delete_file); managed_paths_write.remove(delete_file);
} }
save_managed_paths(self.directory.as_mut(), &meta_informations_wlock)?; if save_managed_paths(self.directory.as_mut(), &meta_informations_wlock).is_err() {
error!("Failed to save the list of managed files.");
}
} }
Ok(GarbageCollectionResult {
deleted_files,
failed_to_delete_files,
})
} }
/// Registers a file as managed /// Registers a file as managed
@@ -265,6 +245,13 @@ impl ManagedDirectory {
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
}
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
@@ -282,10 +269,18 @@ impl Directory for ManagedDirectory {
self.directory.atomic_write(path, data) self.directory.atomic_write(path, data)
} }
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
self.directory.delete(path) self.directory.delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> { fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
self.directory.acquire_lock(lock) self.directory.acquire_lock(lock)
} }
@@ -295,24 +290,6 @@ impl Directory for ManagedDirectory {
} }
} }
impl ReadOnlyDirectory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?;
let (footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
footer.is_compatible()?;
Ok(reader)
}
fn exists(&self, path: &Path) -> bool {
self.directory.exists(path)
}
fn atomic_read(&self, path: &Path) -> result::Result<Vec<u8>, OpenReadError> {
self.directory.atomic_read(path)
}
}
impl Clone for ManagedDirectory { impl Clone for ManagedDirectory {
fn clone(&self) -> ManagedDirectory { fn clone(&self) -> ManagedDirectory {
ManagedDirectory { ManagedDirectory {
@@ -326,9 +303,7 @@ impl Clone for ManagedDirectory {
#[cfg(test)] #[cfg(test)]
mod tests_mmap_specific { mod tests_mmap_specific {
use crate::directory::{ use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite};
Directory, ManagedDirectory, MmapDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::Write; use std::io::Write;
@@ -352,8 +327,9 @@ mod tests_mmap_specific {
.unwrap(); .unwrap();
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(managed_directory.exists(test_path2)); assert!(managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect(); let living_files: HashSet<PathBuf> =
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); [test_path1.to_owned()].into_iter().cloned().collect();
managed_directory.garbage_collect(|| living_files);
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
} }
@@ -363,7 +339,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = HashSet::new(); let living_files: HashSet<PathBuf> = HashSet::new();
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
} }
@@ -385,9 +361,7 @@ mod tests_mmap_specific {
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap(); let _mmap_read = managed_directory.open_read(test_path1).unwrap();
assert!(managed_directory managed_directory.garbage_collect(|| living_files.clone());
.garbage_collect(|| living_files.clone())
.is_ok());
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped. // On Windows, gc should try and fail the file as it is mmapped.
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
@@ -395,7 +369,7 @@ mod tests_mmap_specific {
drop(_mmap_read); drop(_mmap_read);
// The file should still be in the list of managed file and // The file should still be in the list of managed file and
// eventually be deleted once mmap is released. // eventually be deleted once mmap is released.
assert!(managed_directory.garbage_collect(|| living_files).is_ok()); managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
} else { } else {
assert!(!managed_directory.exists(test_path1)); assert!(!managed_directory.exists(test_path1));
@@ -420,8 +394,6 @@ mod tests_mmap_specific {
write.write_all(&[3u8, 4u8, 5u8]).unwrap(); write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap(); write.terminate().unwrap();
let read_source = managed_directory.open_read(test_path2).unwrap();
assert_eq!(read_source.as_slice(), &[3u8, 4u8, 5u8]);
assert!(managed_directory.list_damaged().unwrap().is_empty()); assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone(); let mut corrupted_path = tempdir_path.clone();

View File

@@ -6,7 +6,6 @@ use self::notify::RawEvent;
use self::notify::RecursiveMode; use self::notify::RecursiveMode;
use self::notify::Watcher; use self::notify::Watcher;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::LockError; use crate::directory::error::LockError;
use crate::directory::error::{ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError, DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
@@ -132,13 +131,14 @@ impl MmapCache {
} }
self.cache.remove(full_path); self.cache.remove(full_path);
self.counters.miss += 1; self.counters.miss += 1;
let mmap_opt = open_mmap(full_path)?; Ok(if let Some(mmap) = open_mmap(full_path)? {
Ok(mmap_opt.map(|mmap| {
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap)); let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
let mmap_weak = Arc::downgrade(&mmap_arc); let mmap_weak = Arc::downgrade(&mmap_arc);
self.cache.insert(full_path.to_owned(), mmap_weak); self.cache.insert(full_path.to_owned(), mmap_weak);
mmap_arc Some(mmap_arc)
})) } else {
None
})
} }
} }
@@ -174,7 +174,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point. // We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() { if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH { if filename == *META_FILEPATH {
let _ = watcher_router_clone.broadcast(); watcher_router_clone.broadcast();
} }
} }
} }
@@ -408,6 +408,24 @@ impl TerminatingWrite for SafeFileWriter {
} }
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
/// Any entry associated to the path in the mmap will be /// Any entry associated to the path in the mmap will be
/// removed before the file is deleted. /// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
@@ -426,6 +444,11 @@ impl Directory for MmapDirectory {
} }
} }
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
debug!("Open Write {:?}", path); debug!("Open Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -456,6 +479,25 @@ impl Directory for MmapDirectory {
Ok(BufWriter::new(Box::new(writer))) Ok(BufWriter::new(Box::new(writer)))
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
debug!("Atomic Write {:?}", path); debug!("Atomic Write {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
@@ -489,50 +531,6 @@ impl Directory for MmapDirectory {
} }
} }
impl ReadOnlyDirectory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while reading {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
Ok(mmap_cache
.get_mmap(&full_path)?
.map(ReadOnlySource::from)
.unwrap_or_else(ReadOnlySource::empty))
}
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path);
let mut buffer = Vec::new();
match File::open(&full_path) {
Ok(mut file) => {
file.read_to_end(&mut buffer)
.map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(buffer)
}
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(OpenReadError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -540,15 +538,16 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use crate::indexer::LogMergePolicy;
use crate::schema::{Schema, SchemaBuilder, TEXT}; use crate::schema::{Schema, SchemaBuilder, TEXT};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use std::fs; use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
#[test] #[test]
fn test_open_non_existent_path() { fn test_open_non_existant_path() {
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err()); assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
} }
@@ -641,18 +640,13 @@ mod tests {
let tmp_dir = tempfile::TempDir::new().unwrap(); let tmp_dir = tempfile::TempDir::new().unwrap();
let tmp_dirpath = tmp_dir.path().to_owned(); let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap(); let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join(*META_FILEPATH); let tmp_file = tmp_dirpath.join("coucou");
let _handle = watch_wrapper.watch(Box::new(move || { let _handle = watch_wrapper.watch(Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
})); }));
let (sender, receiver) = crossbeam::channel::unbounded();
let _handle2 = watch_wrapper.watch(Box::new(move || {
let _ = sender.send(());
}));
assert_eq!(counter.load(Ordering::SeqCst), 0); assert_eq!(counter.load(Ordering::SeqCst), 0);
fs::write(&tmp_file, b"whateverwilldo").unwrap(); fs::write(&tmp_file, b"whateverwilldo").unwrap();
assert!(receiver.recv().is_ok()); thread::sleep(Duration::new(0, 1_000u32));
assert!(counter.load(Ordering::SeqCst) >= 1);
} }
#[test] #[test]
@@ -661,42 +655,34 @@ mod tests {
let mut schema_builder: SchemaBuilder = Schema::builder(); let mut schema_builder: SchemaBuilder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
{ {
let index = Index::create(mmap_directory.clone(), schema).unwrap(); let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut log_merge_policy = LogMergePolicy::default(); for _num_commits in 0..16 {
log_merge_policy.set_min_merge_size(3);
index_writer.set_merge_policy(Box::new(log_merge_policy));
for _num_commits in 0..10 {
for _ in 0..10 { for _ in 0..10 {
index_writer.add_document(doc!(text_field=>"abc")); index_writer.add_document(doc!(text_field=>"abc"));
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
let reader = index let reader = index
.reader_builder() .reader_builder()
.reload_policy(ReloadPolicy::Manual) .reload_policy(ReloadPolicy::Manual)
.try_into() .try_into()
.unwrap(); .unwrap();
for _ in 0..30 {
for _ in 0..4 {
index_writer.add_document(doc!(text_field=>"abc")); index_writer.add_document(doc!(text_field=>"abc"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
} }
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
let num_segments = reader.searcher().segment_readers().len(); let num_segments = reader.searcher().segment_readers().len();
assert!(num_segments <= 4); assert_eq!(num_segments, 4);
assert_eq!( assert_eq!(
num_segments * 7, num_segments * 7,
mmap_directory.get_cache_info().mmapped.len() mmap_directory.get_cache_info().mmapped.len()
); );
} }
assert!(mmap_directory.get_cache_info().mmapped.is_empty()); assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
} }
} }

View File

@@ -7,7 +7,6 @@ WORM directory abstraction.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
mod mmap_directory; mod mmap_directory;
mod bundle_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod footer; mod footer;
@@ -20,26 +19,13 @@ mod watch_event_router;
pub mod error; pub mod error;
pub use self::directory::DirectoryLock; pub use self::directory::DirectoryLock;
pub use self::directory::{Directory, DirectoryClone, ReadOnlyDirectory}; pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub use self::ram_directory::RAMDirectory; pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;
pub use self::watch_event_router::{WatchCallback, WatchCallbackList, WatchHandle}; pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write}; use std::io::{self, BufWriter, Write};
use std::path::PathBuf;
/// Outcome of the Garbage collection
pub struct GarbageCollectionResult {
/// List of files that were deleted in this cycle
pub deleted_files: Vec<PathBuf>,
/// List of files that were schedule to be deleted in this cycle,
/// but deletion did not work. This typically happens on windows,
/// as deleting a memory mapped file is forbidden.
///
/// If a searcher is still held, a file cannot be deleted.
/// This is not considered a bug, the file will simply be deleted
/// in the next GC.
pub failed_to_delete_files: Vec<PathBuf>,
}
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory; pub use self::mmap_directory::MmapDirectory;
@@ -47,9 +33,6 @@ pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory; pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly /// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
///
/// The point is that while the type is public, it cannot be built by anyone
/// outside of this module.
pub struct AntiCallToken(()); pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer /// Trait used to indicate when no more write need to be done on a writer
@@ -80,13 +63,6 @@ impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
} }
} }
#[cfg(test)]
impl<'a> TerminatingWrite for &'a mut Vec<u8> {
fn terminate_ref(&mut self, _a: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
/// Write object for Directory. /// Write object for Directory.
/// ///
/// `WritePtr` are required to implement both Write /// `WritePtr` are required to implement both Write

View File

@@ -1,6 +1,4 @@
use crate::common::CountingWriter;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::directory::ReadOnlyDirectory;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken; use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
@@ -117,22 +115,6 @@ impl InnerDirectory {
fn total_mem_usage(&self) -> usize { fn total_mem_usage(&self) -> usize {
self.fs.values().map(|f| f.len()).sum() self.fs.values().map(|f| f.len()).sum()
} }
fn serialize_bundle(&self, wrt: &mut WritePtr) -> io::Result<()> {
let mut counting_writer = CountingWriter::wrap(wrt);
let mut file_index: HashMap<PathBuf, (u64, u64)> = HashMap::default();
for (path, source) in &self.fs {
let start = counting_writer.written_bytes();
counting_writer.write_all(source.as_slice())?;
let stop = counting_writer.written_bytes();
file_index.insert(path.to_path_buf(), (start, stop));
}
let index_offset = counting_writer.written_bytes();
serde_json::to_writer(&mut counting_writer, &file_index)?;
let index_offset_buffer = index_offset.to_le_bytes();
counting_writer.write_all(&index_offset_buffer[..])?;
Ok(())
}
} }
impl fmt::Debug for RAMDirectory { impl fmt::Debug for RAMDirectory {
@@ -162,18 +144,13 @@ impl RAMDirectory {
pub fn total_mem_usage(&self) -> usize { pub fn total_mem_usage(&self) -> usize {
self.fs.read().unwrap().total_mem_usage() self.fs.read().unwrap().total_mem_usage()
} }
/// Serialize the RAMDirectory into a bundle.
///
/// This method will fail, write nothing, and return an error if a
/// clone of this repository exists.
pub fn serialize_bundle(self, wrt: &mut WritePtr) -> io::Result<()> {
let inner_directory_rlock = self.fs.read().unwrap();
inner_directory_rlock.serialize_bundle(wrt)
}
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
fail_point!("RAMDirectory::delete", |_| { fail_point!("RAMDirectory::delete", |_| {
use crate::directory::error::IOError; use crate::directory::error::IOError;
@@ -183,6 +160,10 @@ impl Directory for RAMDirectory {
self.fs.write().unwrap().delete(path) self.fs.write().unwrap().delete(path)
} }
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.write().unwrap(); let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
@@ -196,6 +177,10 @@ impl Directory for RAMDirectory {
} }
} }
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new( fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
@@ -206,11 +191,11 @@ impl Directory for RAMDirectory {
// Reserve the path to prevent calls to .write() to succeed. // Reserve the path to prevent calls to .write() to succeed.
self.fs.write().unwrap().write(path_buf.clone(), &[]); self.fs.write().unwrap().write(path_buf.clone(), &[]);
let mut vec_writer = VecWriter::new(path_buf, self.clone()); let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone());
vec_writer.write_all(data)?; vec_writer.write_all(data)?;
vec_writer.flush()?; vec_writer.flush()?;
if path == Path::new(&*META_FILEPATH) { if path == Path::new(&*META_FILEPATH) {
let _ = self.fs.write().unwrap().watch_router.broadcast(); self.fs.write().unwrap().watch_router.broadcast();
} }
Ok(()) Ok(())
} }
@@ -219,17 +204,3 @@ impl Directory for RAMDirectory {
Ok(self.fs.write().unwrap().watch(watch_callback)) Ok(self.fs.write().unwrap().watch(watch_callback))
} }
} }
impl ReadOnlyDirectory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.read().unwrap().open_read(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
Ok(self.open_read(path)?.as_slice().to_owned())
}
}

View File

@@ -70,12 +70,6 @@ impl ReadOnlySource {
(left, right) (left, right)
} }
/// Splits into 2 `ReadOnlySource`, at the offset `end - right_len`.
pub fn split_from_end(self, right_len: usize) -> (ReadOnlySource, ReadOnlySource) {
let left_len = self.len() - right_len;
self.split(left_len)
}
/// Creates a ReadOnlySource that is just a /// Creates a ReadOnlySource that is just a
/// view over a slice of the data. /// view over a slice of the data.
/// ///

View File

@@ -1,117 +1,25 @@
use super::*; use super::*;
use futures::channel::oneshot;
use futures::executor::block_on;
use std::io::Write; use std::io::Write;
use std::mem; use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::AtomicUsize;
use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::sync::atomic::Ordering;
use std::sync::Arc; use std::sync::Arc;
use std::thread;
use std::time;
use std::time::Duration; use std::time::Duration;
#[cfg(feature = "mmap")] #[test]
mod mmap_directory_tests { fn test_ram_directory() {
use crate::directory::MmapDirectory; let mut ram_directory = RAMDirectory::create();
test_directory(&mut ram_directory);
type DirectoryImpl = MmapDirectory;
fn make_directory() -> DirectoryImpl {
MmapDirectory::create_from_tempdir().unwrap()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
} }
mod ram_directory_tests { #[test]
use crate::directory::RAMDirectory; #[cfg(feature = "mmap")]
fn test_mmap_directory() {
type DirectoryImpl = RAMDirectory; let mut mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
test_directory(&mut mmap_directory);
fn make_directory() -> DirectoryImpl {
RAMDirectory::default()
}
#[test]
fn test_simple() {
let mut directory = make_directory();
super::test_simple(&mut directory);
}
#[test]
fn test_write_create_the_file() {
let mut directory = make_directory();
super::test_write_create_the_file(&mut directory);
}
#[test]
fn test_rewrite_forbidden() {
let mut directory = make_directory();
super::test_rewrite_forbidden(&mut directory);
}
#[test]
fn test_directory_delete() {
let mut directory = make_directory();
super::test_directory_delete(&mut directory);
}
#[test]
fn test_lock_non_blocking() {
let mut directory = make_directory();
super::test_lock_non_blocking(&mut directory);
}
#[test]
fn test_lock_blocking() {
let mut directory = make_directory();
super::test_lock_blocking(&mut directory);
}
#[test]
fn test_watch() {
let mut directory = make_directory();
super::test_watch(&mut directory);
}
} }
#[test] #[test]
@@ -191,39 +99,48 @@ fn test_directory_delete(directory: &mut dyn Directory) {
assert!(directory.delete(&test_path).is_err()); assert!(directory.delete(&test_path).is_err());
} }
fn test_directory(directory: &mut dyn Directory) {
test_simple(directory);
test_rewrite_forbidden(directory);
test_write_create_the_file(directory);
test_directory_delete(directory);
test_lock_non_blocking(directory);
test_lock_blocking(directory);
test_watch(directory);
}
fn test_watch(directory: &mut dyn Directory) { fn test_watch(directory: &mut dyn Directory) {
let num_progress: Arc<AtomicUsize> = Default::default();
let counter: Arc<AtomicUsize> = Default::default(); let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone(); let counter_clone = counter.clone();
let (sender, receiver) = crossbeam::channel::unbounded();
let watch_callback = Box::new(move || { let watch_callback = Box::new(move || {
counter_clone.fetch_add(1, SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
}); });
// This callback is used to synchronize watching in our unit test. assert!(directory
// We bind it to a variable because the callback is removed when that .atomic_write(Path::new("meta.json"), b"random_test_data")
// handle is dropped. .is_ok());
let watch_handle = directory.watch(watch_callback).unwrap(); thread::sleep(Duration::new(0, 10_000));
let _progress_listener = directory assert_eq!(0, counter.load(Ordering::SeqCst));
.watch(Box::new(move || {
let val = num_progress.fetch_add(1, SeqCst);
let _ = sender.send(val);
}))
.unwrap();
let watch_handle = directory.watch(watch_callback).unwrap();
for i in 0..10 { for i in 0..10 {
assert_eq!(i, counter.load(SeqCst)); assert_eq!(i, counter.load(Ordering::SeqCst));
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2") .atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok()); .is_ok());
assert_eq!(receiver.recv_timeout(Duration::from_millis(500)), Ok(i)); for _ in 0..1_000 {
assert_eq!(i + 1, counter.load(SeqCst)); if counter.load(Ordering::SeqCst) > i {
break;
}
thread::sleep(Duration::from_millis(10));
}
assert_eq!(i + 1, counter.load(Ordering::SeqCst));
} }
mem::drop(watch_handle); mem::drop(watch_handle);
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data") .atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok()); .is_ok());
assert!(receiver.recv_timeout(Duration::from_millis(500)).is_ok()); thread::sleep(Duration::from_millis(200));
assert_eq!(10, counter.load(SeqCst)); assert_eq!(10, counter.load(Ordering::SeqCst));
} }
fn test_lock_non_blocking(directory: &mut dyn Directory) { fn test_lock_non_blocking(directory: &mut dyn Directory) {
@@ -257,13 +174,9 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
is_blocking: true, is_blocking: true,
}); });
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
let in_thread = Arc::new(AtomicBool::default());
let in_thread_clone = in_thread.clone();
let (sender, receiver) = oneshot::channel();
std::thread::spawn(move || { std::thread::spawn(move || {
//< lock_a_res is sent to the thread. //< lock_a_res is sent to the thread.
in_thread_clone.store(true, SeqCst); std::thread::sleep(time::Duration::from_millis(10));
let _just_sync = block_on(receiver);
// explicitely droping lock_a_res. It would have been sufficient to just force it // explicitely droping lock_a_res. It would have been sufficient to just force it
// to be part of the move, but the intent seems clearer that way. // to be part of the move, but the intent seems clearer that way.
drop(lock_a_res); drop(lock_a_res);
@@ -276,18 +189,14 @@ fn test_lock_blocking(directory: &mut dyn Directory) {
}); });
assert!(lock_a_res.is_err()); assert!(lock_a_res.is_err());
} }
let directory_clone = directory.box_clone(); {
let (sender2, receiver2) = oneshot::channel(); // the blocking call should wait for at least 10ms.
let join_handle = std::thread::spawn(move || { let start = time::Instant::now();
assert!(sender2.send(()).is_ok()); let lock_a_res = directory.acquire_lock(&Lock {
let lock_a_res = directory_clone.acquire_lock(&Lock {
filepath: PathBuf::from("a.lock"), filepath: PathBuf::from("a.lock"),
is_blocking: true, is_blocking: true,
}); });
assert!(in_thread.load(SeqCst));
assert!(lock_a_res.is_ok()); assert!(lock_a_res.is_ok());
}); assert!(start.elapsed().subsec_millis() >= 10);
assert!(block_on(receiver2).is_ok()); }
assert!(sender.send(()).is_ok());
assert!(join_handle.join().is_ok());
} }

View File

@@ -1,5 +1,3 @@
use futures::channel::oneshot;
use futures::{Future, TryFutureExt};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::Weak; use std::sync::Weak;
@@ -24,20 +22,13 @@ pub struct WatchCallbackList {
#[derive(Clone)] #[derive(Clone)]
pub struct WatchHandle(Arc<WatchCallback>); pub struct WatchHandle(Arc<WatchCallback>);
impl WatchHandle {
/// Create a WatchHandle handle.
pub fn new(watch_callback: Arc<WatchCallback>) -> WatchHandle {
WatchHandle(watch_callback)
}
}
impl WatchCallbackList { impl WatchCallbackList {
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback. /// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle { pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
let watch_callback_arc = Arc::new(watch_callback); let watch_callback_arc = Arc::new(watch_callback);
let watch_callback_weak = Arc::downgrade(&watch_callback_arc); let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
self.router.write().unwrap().push(watch_callback_weak); self.router.write().unwrap().push(watch_callback_weak);
WatchHandle::new(watch_callback_arc) WatchHandle(watch_callback_arc)
} }
fn list_callback(&self) -> Vec<Arc<WatchCallback>> { fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
@@ -56,21 +47,14 @@ impl WatchCallbackList {
} }
/// Triggers all callbacks /// Triggers all callbacks
pub fn broadcast(&self) -> impl Future<Output = ()> { pub fn broadcast(&self) {
let callbacks = self.list_callback(); let callbacks = self.list_callback();
let (sender, receiver) = oneshot::channel();
let result = receiver.unwrap_or_else(|_| ());
if callbacks.is_empty() {
let _ = sender.send(());
return result;
}
let spawn_res = std::thread::Builder::new() let spawn_res = std::thread::Builder::new()
.name("watch-callbacks".to_string()) .name("watch-callbacks".to_string())
.spawn(move || { .spawn(move || {
for callback in callbacks { for callback in callbacks {
callback(); callback();
} }
let _ = sender.send(());
}); });
if let Err(err) = spawn_res { if let Err(err) = spawn_res {
error!( error!(
@@ -78,17 +62,19 @@ impl WatchCallbackList {
err err
); );
} }
result
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use futures::executor::block_on;
use std::mem; use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::thread;
use std::time::Duration;
const WAIT_TIME: u64 = 20;
#[test] #[test]
fn test_watch_event_router_simple() { fn test_watch_event_router_simple() {
@@ -98,22 +84,22 @@ mod tests {
let inc_callback = Box::new(move || { let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst); counter_clone.fetch_add(1, Ordering::SeqCst);
}); });
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
let handle_a = watch_event_router.subscribe(inc_callback); let handle_a = watch_event_router.subscribe(inc_callback);
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(1, counter.load(Ordering::SeqCst)); assert_eq!(1, counter.load(Ordering::SeqCst));
block_on(async { watch_event_router.broadcast();
( watch_event_router.broadcast();
watch_event_router.broadcast().await, watch_event_router.broadcast();
watch_event_router.broadcast().await, thread::sleep(Duration::from_millis(WAIT_TIME));
watch_event_router.broadcast().await,
)
});
assert_eq!(4, counter.load(Ordering::SeqCst)); assert_eq!(4, counter.load(Ordering::SeqCst));
mem::drop(handle_a); mem::drop(handle_a);
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(4, counter.load(Ordering::SeqCst)); assert_eq!(4, counter.load(Ordering::SeqCst));
} }
@@ -129,20 +115,20 @@ mod tests {
}; };
let handle_a = watch_event_router.subscribe(inc_callback(1)); let handle_a = watch_event_router.subscribe(inc_callback(1));
let handle_a2 = watch_event_router.subscribe(inc_callback(10)); let handle_a2 = watch_event_router.subscribe(inc_callback(10));
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(async { watch_event_router.broadcast();
futures::join!( watch_event_router.broadcast();
watch_event_router.broadcast(), thread::sleep(Duration::from_millis(WAIT_TIME));
watch_event_router.broadcast()
)
});
assert_eq!(22, counter.load(Ordering::SeqCst)); assert_eq!(22, counter.load(Ordering::SeqCst));
mem::drop(handle_a); mem::drop(handle_a);
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst)); assert_eq!(32, counter.load(Ordering::SeqCst));
mem::drop(handle_a2); mem::drop(handle_a2);
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
block_on(watch_event_router.broadcast()); watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst)); assert_eq!(32, counter.load(Ordering::SeqCst));
} }
@@ -156,15 +142,14 @@ mod tests {
}); });
let handle_a = watch_event_router.subscribe(inc_callback); let handle_a = watch_event_router.subscribe(inc_callback);
assert_eq!(0, counter.load(Ordering::SeqCst)); assert_eq!(0, counter.load(Ordering::SeqCst));
block_on(async { watch_event_router.broadcast();
let future1 = watch_event_router.broadcast(); watch_event_router.broadcast();
let future2 = watch_event_router.broadcast(); thread::sleep(Duration::from_millis(WAIT_TIME));
futures::join!(future1, future2)
});
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
thread::sleep(Duration::from_millis(WAIT_TIME));
mem::drop(handle_a); mem::drop(handle_a);
let _ = watch_event_router.broadcast(); watch_event_router.broadcast();
block_on(watch_event_router.broadcast()); thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
} }
} }

View File

@@ -2,8 +2,8 @@
use std::io; use std::io;
use crate::directory::error::LockError;
use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; use crate::directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use crate::directory::error::{Incompatibility, LockError};
use crate::fastfield::FastFieldNotAvailableError; use crate::fastfield::FastFieldNotAvailableError;
use crate::query; use crate::query;
use crate::schema; use crate::schema;
@@ -25,10 +25,10 @@ impl DataCorruption {
} }
} }
pub fn comment_only<TS: ToString>(comment: TS) -> DataCorruption { pub fn comment_only(comment: String) -> DataCorruption {
DataCorruption { DataCorruption {
filepath: None, filepath: None,
comment: comment.to_string(), comment,
} }
} }
} }
@@ -80,9 +80,6 @@ pub enum TantivyError {
/// System error. (e.g.: We failed spawning a new thread) /// System error. (e.g.: We failed spawning a new thread)
#[fail(display = "System error.'{}'", _0)] #[fail(display = "System error.'{}'", _0)]
SystemError(String), SystemError(String),
/// Index incompatible with current version of tantivy
#[fail(display = "{:?}", _0)]
IncompatibleIndex(Incompatibility),
} }
impl From<DataCorruption> for TantivyError { impl From<DataCorruption> for TantivyError {
@@ -132,9 +129,6 @@ impl From<OpenReadError> for TantivyError {
match error { match error {
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath), OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error), OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
OpenReadError::IncompatibleIndex(incompatibility) => {
TantivyError::IncompatibleIndex(incompatibility)
}
} }
} }
} }
@@ -176,9 +170,3 @@ impl From<serde_json::Error> for TantivyError {
TantivyError::IOError(io_err.into()) TantivyError::IOError(io_err.into())
} }
} }
impl From<rayon::ThreadPoolBuildError> for TantivyError {
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
TantivyError::SystemError(error.to_string())
}
}

View File

@@ -1,19 +1,17 @@
use crate::common::{BitSet, HasLen}; use crate::common::HasLen;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::space_usage::ByteCount; use crate::space_usage::ByteCount;
use crate::DocId; use crate::DocId;
use bit_set::BitSet;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
/// Write a delete `BitSet` /// Write a delete `BitSet`
/// ///
/// where `delete_bitset` is the set of deleted `DocId`. /// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset( pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
delete_bitset: &BitSet, let max_doc = delete_bitset.capacity();
max_doc: u32,
writer: &mut WritePtr,
) -> io::Result<()> {
let mut byte = 0u8; let mut byte = 0u8;
let mut shift = 0u8; let mut shift = 0u8;
for doc in 0..max_doc { for doc in 0..max_doc {
@@ -31,7 +29,7 @@ pub fn write_delete_bitset(
if max_doc % 8 > 0 { if max_doc % 8 > 0 {
writer.write_all(&[byte])?; writer.write_all(&[byte])?;
} }
Ok(()) writer.flush()
} }
/// Set of deleted `DocId`s. /// Set of deleted `DocId`s.
@@ -85,40 +83,43 @@ impl HasLen for DeleteBitSet {
mod tests { mod tests {
use super::*; use super::*;
use crate::directory::*; use crate::directory::*;
use bit_set::BitSet;
use std::path::PathBuf; use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) { fn test_delete_bitset_helper(bitset: &BitSet) {
let test_path = PathBuf::from("test"); let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
{ {
let mut writer = directory.open_write(&*test_path).unwrap(); let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap(); write_delete_bitset(bitset, &mut writer).unwrap();
writer.terminate().unwrap();
} }
let source = directory.open_read(&test_path).unwrap(); {
let delete_bitset = DeleteBitSet::open(source); let source = directory.open_read(&test_path).unwrap();
for doc in 0..max_doc { let delete_bitset = DeleteBitSet::open(source);
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId)); let n = bitset.capacity();
for doc in 0..n {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
} }
assert_eq!(delete_bitset.len(), bitset.len());
} }
#[test] #[test]
fn test_delete_bitset() { fn test_delete_bitset() {
{ {
let mut bitset = BitSet::with_max_value(10); let mut bitset = BitSet::with_capacity(10);
bitset.insert(1); bitset.insert(1);
bitset.insert(9); bitset.insert(9);
test_delete_bitset_helper(&bitset, 10); test_delete_bitset_helper(&bitset);
} }
{ {
let mut bitset = BitSet::with_max_value(8); let mut bitset = BitSet::with_capacity(8);
bitset.insert(1); bitset.insert(1);
bitset.insert(2); bitset.insert(2);
bitset.insert(3); bitset.insert(3);
bitset.insert(5); bitset.insert(5);
bitset.insert(7); bitset.insert(7);
test_delete_bitset_helper(&bitset, 8); test_delete_bitset_helper(&bitset);
} }
} }
} }

View File

@@ -33,7 +33,6 @@ pub use self::reader::FastFieldReader;
pub use self::readers::FastFieldReaders; pub use self::readers::FastFieldReaders;
pub use self::serializer::FastFieldSerializer; pub use self::serializer::FastFieldSerializer;
pub use self::writer::{FastFieldsWriter, IntFastFieldWriter}; pub use self::writer::{FastFieldsWriter, IntFastFieldWriter};
use crate::chrono::{NaiveDateTime, Utc};
use crate::common; use crate::common;
use crate::schema::Cardinality; use crate::schema::Cardinality;
use crate::schema::FieldType; use crate::schema::FieldType;
@@ -50,7 +49,7 @@ mod serializer;
mod writer; mod writer;
/// Trait for types that are allowed for fast fields: (u64, i64 and f64). /// Trait for types that are allowed for fast fields: (u64, i64 and f64).
pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd { pub trait FastValue: Default + Clone + Copy + Send + Sync + PartialOrd {
/// Converts a value from u64 /// Converts a value from u64
/// ///
/// Internally all fast field values are encoded as u64. /// Internally all fast field values are encoded as u64.
@@ -70,12 +69,6 @@ pub trait FastValue: Clone + Copy + Send + Sync + PartialOrd {
/// Cast value to `u64`. /// Cast value to `u64`.
/// The value is just reinterpreted in memory. /// The value is just reinterpreted in memory.
fn as_u64(&self) -> u64; fn as_u64(&self) -> u64;
/// Build a default value. This default value is never used, so the value does not
/// really matter.
fn make_zero() -> Self {
Self::from_u64(0i64.to_u64())
}
} }
impl FastValue for u64 { impl FastValue for u64 {
@@ -142,34 +135,11 @@ impl FastValue for f64 {
} }
} }
impl FastValue for crate::DateTime {
fn from_u64(timestamp_u64: u64) -> Self {
let timestamp_i64 = i64::from_u64(timestamp_u64);
crate::DateTime::from_utc(NaiveDateTime::from_timestamp(timestamp_i64, 0), Utc)
}
fn to_u64(&self) -> u64 {
self.timestamp().to_u64()
}
fn fast_field_cardinality(field_type: &FieldType) -> Option<Cardinality> {
match *field_type {
FieldType::Date(ref integer_options) => integer_options.get_fastfield_cardinality(),
_ => None,
}
}
fn as_u64(&self) -> u64 {
self.timestamp().as_u64()
}
}
fn value_to_u64(value: &Value) -> u64 { fn value_to_u64(value: &Value) -> u64 {
match *value { match *value {
Value::U64(ref val) => *val, Value::U64(ref val) => *val,
Value::I64(ref val) => common::i64_to_u64(*val), Value::I64(ref val) => common::i64_to_u64(*val),
Value::F64(ref val) => common::f64_to_u64(*val), Value::F64(ref val) => common::f64_to_u64(*val),
Value::Date(ref datetime) => common::i64_to_u64(datetime.timestamp()),
_ => panic!("Expected a u64/i64/f64 field, got {:?} ", value), _ => panic!("Expected a u64/i64/f64 field, got {:?} ", value),
} }
} }
@@ -179,14 +149,12 @@ mod tests {
use super::*; use super::*;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::merge_policy::NoMergePolicy; use crate::schema::Document;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;
use crate::schema::{Document, IntOptions};
use crate::{Index, SegmentId, SegmentReader};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
@@ -210,12 +178,6 @@ mod tests {
assert_eq!(test_fastfield.get(2), 300); assert_eq!(test_fastfield.get(2), 300);
} }
#[test]
pub fn test_fastfield_i64_u64() {
let datetime = crate::DateTime::from_utc(NaiveDateTime::from_timestamp(0i64, 0), Utc);
assert_eq!(i64::from_u64(datetime.to_u64()), 0i64);
}
#[test] #[test]
fn test_intfastfield_small() { fn test_intfastfield_small() {
let path = Path::new("test"); let path = Path::new("test");
@@ -467,93 +429,6 @@ mod tests {
} }
} }
} }
#[test]
fn test_merge_missing_date_fast_field() {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", FAST);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(date_field =>crate::chrono::prelude::Utc::now()));
index_writer.commit().unwrap();
index_writer.add_document(doc!());
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let segment_ids: Vec<SegmentId> = reader
.searcher()
.segment_readers()
.iter()
.map(SegmentReader::segment_id)
.collect();
assert_eq!(segment_ids.len(), 2);
let merge_future = index_writer.merge(&segment_ids[..]);
let merge_res = futures::executor::block_on(merge_future);
assert!(merge_res.is_ok());
assert!(reader.reload().is_ok());
assert_eq!(reader.searcher().segment_readers().len(), 1);
}
#[test]
fn test_default_datetime() {
assert_eq!(crate::DateTime::make_zero().timestamp(), 0i64);
}
#[test]
fn test_datefastfield() {
use crate::fastfield::FastValue;
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", FAST);
let multi_date_field = schema_builder.add_date_field(
"multi_date",
IntOptions::default().set_fast(Cardinality::MultiValues),
);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(1i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(2i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(3i64.to_u64())
));
index_writer.add_document(doc!(
date_field => crate::DateTime::from_u64(4i64.to_u64())
));
index_writer.add_document(doc!(
multi_date_field => crate::DateTime::from_u64(5i64.to_u64()),
multi_date_field => crate::DateTime::from_u64(6i64.to_u64())
));
index_writer.commit().unwrap();
let reader = index.reader().unwrap();
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0);
let fast_fields = segment_reader.fast_fields();
let date_fast_field = fast_fields.date(date_field).unwrap();
let dates_fast_field = fast_fields.dates(multi_date_field).unwrap();
let mut dates = vec![];
{
assert_eq!(date_fast_field.get(0u32).timestamp(), 1i64);
dates_fast_field.get_vals(0u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].timestamp(), 2i64);
assert_eq!(dates[1].timestamp(), 3i64);
}
{
assert_eq!(date_fast_field.get(1u32).timestamp(), 4i64);
dates_fast_field.get_vals(1u32, &mut dates);
assert!(dates.is_empty());
}
{
assert_eq!(date_fast_field.get(2u32).timestamp(), 0i64);
dates_fast_field.get_vals(2u32, &mut dates);
assert_eq!(dates.len(), 2);
assert_eq!(dates[0].timestamp(), 5i64);
assert_eq!(dates[1].timestamp(), 6i64);
}
}
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]

View File

@@ -45,7 +45,7 @@ impl<Item: FastValue> MultiValueIntFastFieldReader<Item> {
pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) { pub fn get_vals(&self, doc: DocId, vals: &mut Vec<Item>) {
let (start, stop) = self.range(doc); let (start, stop) = self.range(doc);
let len = (stop - start) as usize; let len = (stop - start) as usize;
vals.resize(len, Item::make_zero()); vals.resize(len, Item::default());
self.vals_reader.get_range_u64(start, &mut vals[..]); self.vals_reader.get_range_u64(start, &mut vals[..]);
} }

View File

@@ -4,7 +4,7 @@ use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeFile; use crate::common::CompositeFile;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;

View File

@@ -15,11 +15,9 @@ pub struct FastFieldReaders {
fast_field_i64: HashMap<Field, FastFieldReader<i64>>, fast_field_i64: HashMap<Field, FastFieldReader<i64>>,
fast_field_u64: HashMap<Field, FastFieldReader<u64>>, fast_field_u64: HashMap<Field, FastFieldReader<u64>>,
fast_field_f64: HashMap<Field, FastFieldReader<f64>>, fast_field_f64: HashMap<Field, FastFieldReader<f64>>,
fast_field_date: HashMap<Field, FastFieldReader<crate::DateTime>>,
fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>, fast_field_i64s: HashMap<Field, MultiValueIntFastFieldReader<i64>>,
fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>, fast_field_u64s: HashMap<Field, MultiValueIntFastFieldReader<u64>>,
fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>, fast_field_f64s: HashMap<Field, MultiValueIntFastFieldReader<f64>>,
fast_field_dates: HashMap<Field, MultiValueIntFastFieldReader<crate::DateTime>>,
fast_bytes: HashMap<Field, BytesFastFieldReader>, fast_bytes: HashMap<Field, BytesFastFieldReader>,
fast_fields_composite: CompositeFile, fast_fields_composite: CompositeFile,
} }
@@ -28,7 +26,6 @@ enum FastType {
I64, I64,
U64, U64,
F64, F64,
Date,
} }
fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> { fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality)> {
@@ -42,9 +39,6 @@ fn type_and_cardinality(field_type: &FieldType) -> Option<(FastType, Cardinality
FieldType::F64(options) => options FieldType::F64(options) => options
.get_fastfield_cardinality() .get_fastfield_cardinality()
.map(|cardinality| (FastType::F64, cardinality)), .map(|cardinality| (FastType::F64, cardinality)),
FieldType::Date(options) => options
.get_fastfield_cardinality()
.map(|cardinality| (FastType::Date, cardinality)),
FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)), FieldType::HierarchicalFacet => Some((FastType::U64, Cardinality::MultiValues)),
_ => None, _ => None,
} }
@@ -59,15 +53,14 @@ impl FastFieldReaders {
fast_field_i64: Default::default(), fast_field_i64: Default::default(),
fast_field_u64: Default::default(), fast_field_u64: Default::default(),
fast_field_f64: Default::default(), fast_field_f64: Default::default(),
fast_field_date: Default::default(),
fast_field_i64s: Default::default(), fast_field_i64s: Default::default(),
fast_field_u64s: Default::default(), fast_field_u64s: Default::default(),
fast_field_f64s: Default::default(), fast_field_f64s: Default::default(),
fast_field_dates: Default::default(),
fast_bytes: Default::default(), fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(), fast_fields_composite: fast_fields_composite.clone(),
}; };
for (field, field_entry) in schema.fields() { for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
if field_type == &FieldType::Bytes { if field_type == &FieldType::Bytes {
let idx_reader = fast_fields_composite let idx_reader = fast_fields_composite
@@ -103,12 +96,6 @@ impl FastFieldReaders {
FastFieldReader::open(fast_field_data.clone()), FastFieldReader::open(fast_field_data.clone()),
); );
} }
FastType::Date => {
fast_field_readers.fast_field_date.insert(
field,
FastFieldReader::open(fast_field_data.clone()),
);
}
} }
} else { } else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry))); return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
@@ -144,14 +131,6 @@ impl FastFieldReaders {
.fast_field_f64s .fast_field_f64s
.insert(field, multivalued_int_fast_field); .insert(field, multivalued_int_fast_field);
} }
FastType::Date => {
let vals_reader = FastFieldReader::open(fast_field_data);
let multivalued_int_fast_field =
MultiValueIntFastFieldReader::open(idx_reader, vals_reader);
fast_field_readers
.fast_field_dates
.insert(field, multivalued_int_fast_field);
}
} }
} else { } else {
return Err(From::from(FastFieldNotAvailableError::new(field_entry))); return Err(From::from(FastFieldNotAvailableError::new(field_entry)));
@@ -178,6 +157,8 @@ impl FastFieldReaders {
/// If the field is a i64-fast field, return the associated u64 reader. Values are /// If the field is a i64-fast field, return the associated u64 reader. Values are
/// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. /// /// mapped from i64 to u64 using a (well the, it is unique) monotonic mapping. ///
/// ///
///TODO should it also be lenient with f64?
///
/// This method is useful when merging segment reader. /// This method is useful when merging segment reader.
pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> { pub(crate) fn u64_lenient(&self, field: Field) -> Option<FastFieldReader<u64>> {
if let Some(u64_ff_reader) = self.u64(field) { if let Some(u64_ff_reader) = self.u64(field) {
@@ -186,12 +167,6 @@ impl FastFieldReaders {
if let Some(i64_ff_reader) = self.i64(field) { if let Some(i64_ff_reader) = self.i64(field) {
return Some(i64_ff_reader.into_u64_reader()); return Some(i64_ff_reader.into_u64_reader());
} }
if let Some(f64_ff_reader) = self.f64(field) {
return Some(f64_ff_reader.into_u64_reader());
}
if let Some(date_ff_reader) = self.date(field) {
return Some(date_ff_reader.into_u64_reader());
}
None None
} }
@@ -202,13 +177,6 @@ impl FastFieldReaders {
self.fast_field_i64.get(&field).cloned() self.fast_field_i64.get(&field).cloned()
} }
/// Returns the `i64` fast field reader reader associated to `field`.
///
/// If `field` is not a i64 fast field, this method returns `None`.
pub fn date(&self, field: Field) -> Option<FastFieldReader<crate::DateTime>> {
self.fast_field_date.get(&field).cloned()
}
/// Returns the `f64` fast field reader reader associated to `field`. /// Returns the `f64` fast field reader reader associated to `field`.
/// ///
/// If `field` is not a f64 fast field, this method returns `None`. /// If `field` is not a f64 fast field, this method returns `None`.
@@ -235,9 +203,6 @@ impl FastFieldReaders {
if let Some(i64s_ff_reader) = self.i64s(field) { if let Some(i64s_ff_reader) = self.i64s(field) {
return Some(i64s_ff_reader.into_u64s_reader()); return Some(i64s_ff_reader.into_u64s_reader());
} }
if let Some(f64s_ff_reader) = self.f64s(field) {
return Some(f64s_ff_reader.into_u64s_reader());
}
None None
} }
@@ -255,13 +220,6 @@ impl FastFieldReaders {
self.fast_field_f64s.get(&field).cloned() self.fast_field_f64s.get(&field).cloned()
} }
/// Returns a `crate::DateTime` multi-valued fast field reader reader associated to `field`.
///
/// If `field` is not a `crate::DateTime` multi-valued fast field, this method returns `None`.
pub fn dates(&self, field: Field) -> Option<MultiValueIntFastFieldReader<crate::DateTime>> {
self.fast_field_dates.get(&field).cloned()
}
/// Returns the `bytes` fast field reader associated to `field`. /// Returns the `bytes` fast field reader associated to `field`.
/// ///
/// If `field` is not a bytes fast field, returns `None`. /// If `field` is not a bytes fast field, returns `None`.

View File

@@ -4,7 +4,7 @@ use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer}; use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema}; use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use std::collections::HashMap; use std::collections::HashMap;
@@ -17,14 +17,6 @@ pub struct FastFieldsWriter {
bytes_value_writers: Vec<BytesFastFieldWriter>, bytes_value_writers: Vec<BytesFastFieldWriter>,
} }
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
match *field_entry.field_type() {
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
_ => 0u64,
}
}
impl FastFieldsWriter { impl FastFieldsWriter {
/// Create all `FastFieldWriter` required by the schema. /// Create all `FastFieldWriter` required by the schema.
pub fn from_schema(schema: &Schema) -> FastFieldsWriter { pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
@@ -32,16 +24,20 @@ impl FastFieldsWriter {
let mut multi_values_writers = Vec::new(); let mut multi_values_writers = Vec::new();
let mut bytes_value_writers = Vec::new(); let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() { for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let default_value = match *field_entry.field_type() {
FieldType::I64(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
_ => 0u64,
};
match *field_entry.field_type() { match *field_entry.field_type() {
FieldType::I64(ref int_options) FieldType::I64(ref int_options)
| FieldType::U64(ref int_options) | FieldType::U64(ref int_options)
| FieldType::F64(ref int_options) | FieldType::F64(ref int_options) => {
| FieldType::Date(ref int_options) => {
match int_options.get_fastfield_cardinality() { match int_options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => { Some(Cardinality::SingleValue) => {
let mut fast_field_writer = IntFastFieldWriter::new(field); let mut fast_field_writer = IntFastFieldWriter::new(field);
let default_value = fast_field_default_value(field_entry);
fast_field_writer.set_val_if_missing(default_value); fast_field_writer.set_val_if_missing(default_value);
single_value_writers.push(fast_field_writer); single_value_writers.push(fast_field_writer);
} }

View File

@@ -22,14 +22,11 @@ impl FieldNormsWriter {
pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> { pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> {
schema schema
.fields() .fields()
.filter_map(|(field, field_entry)| { .iter()
if field_entry.is_indexed() { .enumerate()
Some(field) .filter(|&(_, field_entry)| field_entry.is_indexed())
} else { .map(|(field, _)| Field(field as u32))
None .collect::<Vec<Field>>()
}
})
.collect::<Vec<_>>()
} }
/// Initialize with state for tracking the field norm fields /// Initialize with state for tracking the field norm fields
@@ -38,7 +35,7 @@ impl FieldNormsWriter {
let fields = FieldNormsWriter::fields_with_fieldnorm(schema); let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
let max_field = fields let max_field = fields
.iter() .iter()
.map(Field::field_id) .map(|field| field.0)
.max() .max()
.map(|max_field_id| max_field_id as usize + 1) .map(|max_field_id| max_field_id as usize + 1)
.unwrap_or(0); .unwrap_or(0);
@@ -53,8 +50,8 @@ impl FieldNormsWriter {
/// ///
/// Will extend with 0-bytes for documents that have not been seen. /// Will extend with 0-bytes for documents that have not been seen.
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) { pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
for field in self.fields.iter() { for &field in self.fields.iter() {
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8); self.fieldnorms_buffer[field.0 as usize].resize(max_doc as usize, 0u8);
} }
} }
@@ -67,7 +64,7 @@ impl FieldNormsWriter {
/// * field - the field being set /// * field - the field being set
/// * fieldnorm - the number of terms present in document `doc` in field `field` /// * fieldnorm - the number of terms present in document `doc` in field `field`
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) { pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize]; let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.0 as usize];
assert!( assert!(
fieldnorm_buffer.len() <= doc as usize, fieldnorm_buffer.len() <= doc as usize,
"Cannot register a given fieldnorm twice" "Cannot register a given fieldnorm twice"
@@ -80,7 +77,7 @@ impl FieldNormsWriter {
/// Serialize the seen fieldnorm values to the serializer for all fields. /// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> { pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() { for &field in self.fields.iter() {
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..]; let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.0 as usize][..];
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?; fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
} }
Ok(()) Ok(())

View File

@@ -2,7 +2,7 @@ use super::operation::DeleteOperation;
use crate::Opstamp; use crate::Opstamp;
use std::mem; use std::mem;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::{Arc, RwLock, Weak}; use std::sync::{Arc, RwLock};
// The DeleteQueue is similar in conceptually to a multiple // The DeleteQueue is similar in conceptually to a multiple
// consumer single producer broadcast channel. // consumer single producer broadcast channel.
@@ -14,15 +14,14 @@ use std::sync::{Arc, RwLock, Weak};
// //
// New consumer can be created in two ways // New consumer can be created in two ways
// - calling `delete_queue.cursor()` returns a cursor, that // - calling `delete_queue.cursor()` returns a cursor, that
// will include all future delete operation (and some or none // will include all future delete operation (and no past operations).
// of the past operations... The client is in charge of checking the opstamps.).
// - cloning an existing cursor returns a new cursor, that // - cloning an existing cursor returns a new cursor, that
// is at the exact same position, and can now advance independently // is at the exact same position, and can now advance independently
// from the original cursor. // from the original cursor.
#[derive(Default)] #[derive(Default)]
struct InnerDeleteQueue { struct InnerDeleteQueue {
writer: Vec<DeleteOperation>, writer: Vec<DeleteOperation>,
last_block: Weak<Block>, last_block: Option<Arc<Block>>,
} }
#[derive(Clone)] #[derive(Clone)]
@@ -33,31 +32,21 @@ pub struct DeleteQueue {
impl DeleteQueue { impl DeleteQueue {
// Creates a new delete queue. // Creates a new delete queue.
pub fn new() -> DeleteQueue { pub fn new() -> DeleteQueue {
DeleteQueue { let delete_queue = DeleteQueue {
inner: Arc::default(), inner: Arc::default(),
} };
}
let next_block = NextBlock::from(delete_queue.clone());
fn get_last_block(&self) -> Arc<Block> {
{ {
// try get the last block with simply acquiring the read lock. let mut delete_queue_wlock = delete_queue.inner.write().unwrap();
let rlock = self.inner.read().unwrap(); delete_queue_wlock.last_block = Some(Arc::new(Block {
if let Some(block) = rlock.last_block.upgrade() { operations: Arc::default(),
return block; next: next_block,
} }));
} }
// It failed. Let's double check after acquiring the write, as someone could have called
// `get_last_block` right after we released the rlock. delete_queue
let mut wlock = self.inner.write().unwrap();
if let Some(block) = wlock.last_block.upgrade() {
return block;
}
let block = Arc::new(Block {
operations: Arc::default(),
next: NextBlock::from(self.clone()),
});
wlock.last_block = Arc::downgrade(&block);
block
} }
// Creates a new cursor that makes it possible to // Creates a new cursor that makes it possible to
@@ -65,7 +54,17 @@ impl DeleteQueue {
// //
// Past delete operations are not accessible. // Past delete operations are not accessible.
pub fn cursor(&self) -> DeleteCursor { pub fn cursor(&self) -> DeleteCursor {
let last_block = self.get_last_block(); let last_block = self
.inner
.read()
.expect("Read lock poisoned when opening delete queue cursor")
.last_block
.clone()
.expect(
"Failed to unwrap last_block. This should never happen
as the Option<> is only here to make
initialization possible",
);
let operations_len = last_block.operations.len(); let operations_len = last_block.operations.len();
DeleteCursor { DeleteCursor {
block: last_block, block: last_block,
@@ -101,19 +100,23 @@ impl DeleteQueue {
.write() .write()
.expect("Failed to acquire write lock on delete queue writer"); .expect("Failed to acquire write lock on delete queue writer");
if self_wlock.writer.is_empty() { let delete_operations;
return None; {
let writer: &mut Vec<DeleteOperation> = &mut self_wlock.writer;
if writer.is_empty() {
return None;
}
delete_operations = mem::replace(writer, vec![]);
} }
let delete_operations = mem::replace(&mut self_wlock.writer, vec![]); let next_block = NextBlock::from(self.clone());
{
let new_block = Arc::new(Block { self_wlock.last_block = Some(Arc::new(Block {
operations: Arc::new(delete_operations.into_boxed_slice()), operations: Arc::new(delete_operations),
next: NextBlock::from(self.clone()), next: next_block,
}); }));
}
self_wlock.last_block = Arc::downgrade(&new_block); self_wlock.last_block.clone()
Some(new_block)
} }
} }
@@ -167,7 +170,7 @@ impl NextBlock {
} }
struct Block { struct Block {
operations: Arc<Box<[DeleteOperation]>>, operations: Arc<Vec<DeleteOperation>>,
next: NextBlock, next: NextBlock,
} }
@@ -255,7 +258,7 @@ mod tests {
let delete_queue = DeleteQueue::new(); let delete_queue = DeleteQueue::new();
let make_op = |i: usize| { let make_op = |i: usize| {
let field = Field::from_field_id(1u32); let field = Field(1u32);
DeleteOperation { DeleteOperation {
opstamp: i as u64, opstamp: i as u64,
term: Term::from_field_u64(field, i as u64), term: Term::from_field_u64(field, i as u64),

View File

@@ -1,15 +1,14 @@
use super::operation::{AddOperation, UserOperation}; use super::operation::{AddOperation, UserOperation};
use super::segment_updater::SegmentUpdater; use super::segment_updater::SegmentUpdater;
use super::PreparedCommit; use super::PreparedCommit;
use crate::common::BitSet;
use crate::core::Index; use crate::core::Index;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::directory::DirectoryLock;
use crate::directory::TerminatingWrite; use crate::directory::TerminatingWrite;
use crate::directory::{DirectoryLock, GarbageCollectionResult};
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset; use crate::fastfield::write_delete_bitset;
@@ -24,9 +23,10 @@ use crate::schema::Document;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::Term; use crate::schema::Term;
use crate::Opstamp; use crate::Opstamp;
use crate::Result;
use bit_set::BitSet;
use crossbeam::channel; use crossbeam::channel;
use futures::executor::block_on; use futures::{Canceled, Future};
use futures::future::Future;
use smallvec::smallvec; use smallvec::smallvec;
use smallvec::SmallVec; use smallvec::SmallVec;
use std::mem; use std::mem;
@@ -72,7 +72,7 @@ pub struct IndexWriter {
heap_size_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
workers_join_handle: Vec<JoinHandle<crate::Result<()>>>, workers_join_handle: Vec<JoinHandle<Result<()>>>,
operation_receiver: OperationReceiver, operation_receiver: OperationReceiver,
operation_sender: OperationSender, operation_sender: OperationSender,
@@ -95,7 +95,7 @@ fn compute_deleted_bitset(
delete_cursor: &mut DeleteCursor, delete_cursor: &mut DeleteCursor,
doc_opstamps: &DocToOpstampMapping, doc_opstamps: &DocToOpstampMapping,
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> crate::Result<bool> { ) -> Result<bool> {
let mut might_have_changed = false; let mut might_have_changed = false;
while let Some(delete_op) = delete_cursor.get() { while let Some(delete_op) = delete_cursor.get() {
if delete_op.opstamp > target_opstamp { if delete_op.opstamp > target_opstamp {
@@ -115,7 +115,7 @@ fn compute_deleted_bitset(
while docset.advance() { while docset.advance() {
let deleted_doc = docset.doc(); let deleted_doc = docset.doc();
if deleted_doc < limit_doc { if deleted_doc < limit_doc {
delete_bitset.insert(deleted_doc); delete_bitset.insert(deleted_doc as usize);
might_have_changed = true; might_have_changed = true;
} }
} }
@@ -126,73 +126,65 @@ fn compute_deleted_bitset(
Ok(might_have_changed) Ok(might_have_changed)
} }
/// Advance delete for the given segment up to the target opstamp. /// Advance delete for the given segment up
/// /// to the target opstamp.
/// Note that there are no guarantee that the resulting `segment_entry` delete_opstamp
/// is `==` target_opstamp.
/// For instance, there was no delete operation between the state of the `segment_entry` and
/// the `target_opstamp`, `segment_entry` is not updated.
pub(crate) fn advance_deletes( pub(crate) fn advance_deletes(
mut segment: Segment, mut segment: Segment,
segment_entry: &mut SegmentEntry, segment_entry: &mut SegmentEntry,
target_opstamp: Opstamp, target_opstamp: Opstamp,
) -> crate::Result<()> { ) -> Result<()> {
if segment_entry.meta().delete_opstamp() == Some(target_opstamp) { {
// We are already up-to-date here. if segment_entry.meta().delete_opstamp() == Some(target_opstamp) {
return Ok(()); // We are already up-to-date here.
} return Ok(());
}
if segment_entry.delete_bitset().is_none() && segment_entry.delete_cursor().get().is_none() { let segment_reader = SegmentReader::open(&segment)?;
// There has been no `DeleteOperation` between the segment status and `target_opstamp`.
return Ok(());
}
let segment_reader = SegmentReader::open(&segment)?; let max_doc = segment_reader.max_doc();
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_capacity(max_doc as usize),
};
let max_doc = segment_reader.max_doc(); let delete_cursor = segment_entry.delete_cursor();
let mut delete_bitset: BitSet = match segment_entry.delete_bitset() {
Some(previous_delete_bitset) => (*previous_delete_bitset).clone(),
None => BitSet::with_max_value(max_doc),
};
compute_deleted_bitset( compute_deleted_bitset(
&mut delete_bitset, &mut delete_bitset,
&segment_reader, &segment_reader,
segment_entry.delete_cursor(), delete_cursor,
&DocToOpstampMapping::None, &DocToOpstampMapping::None,
target_opstamp, target_opstamp,
)?; )?;
// TODO optimize // TODO optimize
if let Some(seg_delete_bitset) = segment_reader.delete_bitset() {
for doc in 0u32..max_doc { for doc in 0u32..max_doc {
if seg_delete_bitset.is_deleted(doc) { if segment_reader.is_deleted(doc) {
delete_bitset.insert(doc); delete_bitset.insert(doc as usize);
} }
} }
}
let num_deleted_docs = delete_bitset.len(); let num_deleted_docs = delete_bitset.len();
if num_deleted_docs > 0 { if num_deleted_docs > 0 {
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp); segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?; let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?; write_delete_bitset(&delete_bitset, &mut delete_file)?;
delete_file.terminate()?; delete_file.terminate()?;
}
} }
segment_entry.set_meta(segment.meta().clone()); segment_entry.set_meta(segment.meta().clone());
Ok(()) Ok(())
} }
fn index_documents( fn index_documents(
memory_budget: usize, memory_budget: usize,
segment: Segment, segment: &Segment,
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>, grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
segment_updater: &mut SegmentUpdater, segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor, mut delete_cursor: DeleteCursor,
) -> crate::Result<bool> { ) -> Result<bool> {
let schema = segment.schema(); let schema = segment.schema();
let segment_id = segment.id();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?; let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
for document_group in grouped_document_iterator { for document_group in grouped_document_iterator {
for doc in document_group { for doc in document_group {
@@ -212,32 +204,22 @@ fn index_documents(
return Ok(false); return Ok(false);
} }
let max_doc = segment_writer.max_doc(); let num_docs = segment_writer.max_doc();
// this is ensured by the call to peek before starting // this is ensured by the call to peek before starting
// the worker thread. // the worker thread.
assert!(max_doc > 0); assert!(num_docs > 0);
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?; let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
let segment_meta = segment.index().new_segment_meta(segment_id, num_docs);
let segment_with_max_doc = segment.with_max_doc(max_doc);
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap()); let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
let delete_bitset_opt = apply_deletes( let delete_bitset_opt =
&segment_with_max_doc, apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
&mut delete_cursor,
&doc_opstamps,
last_docstamp,
)?;
let segment_entry = SegmentEntry::new( let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, delete_bitset_opt);
segment_with_max_doc.meta().clone(), Ok(segment_updater.add_segment(segment_entry))
delete_cursor,
delete_bitset_opt,
);
block_on(segment_updater.schedule_add_segment(segment_entry))?;
Ok(true)
} }
fn apply_deletes( fn apply_deletes(
@@ -245,7 +227,7 @@ fn apply_deletes(
mut delete_cursor: &mut DeleteCursor, mut delete_cursor: &mut DeleteCursor,
doc_opstamps: &[Opstamp], doc_opstamps: &[Opstamp],
last_docstamp: Opstamp, last_docstamp: Opstamp,
) -> crate::Result<Option<BitSet>> { ) -> Result<Option<BitSet<u32>>> {
if delete_cursor.get().is_none() { if delete_cursor.get().is_none() {
// if there are no delete operation in the queue, no need // if there are no delete operation in the queue, no need
// to even open the segment. // to even open the segment.
@@ -253,9 +235,7 @@ fn apply_deletes(
} }
let segment_reader = SegmentReader::open(segment)?; let segment_reader = SegmentReader::open(segment)?;
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps); let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
let mut deleted_bitset = BitSet::with_capacity(segment_reader.max_doc() as usize);
let max_doc = segment.meta().max_doc();
let mut deleted_bitset = BitSet::with_max_value(max_doc);
let may_have_deletes = compute_deleted_bitset( let may_have_deletes = compute_deleted_bitset(
&mut deleted_bitset, &mut deleted_bitset,
&segment_reader, &segment_reader,
@@ -290,7 +270,7 @@ impl IndexWriter {
num_threads: usize, num_threads: usize,
heap_size_in_bytes_per_thread: usize, heap_size_in_bytes_per_thread: usize,
directory_lock: DirectoryLock, directory_lock: DirectoryLock,
) -> crate::Result<IndexWriter> { ) -> Result<IndexWriter> {
if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN { if heap_size_in_bytes_per_thread < HEAP_SIZE_MIN {
let err_msg = format!( let err_msg = format!(
"The heap size per thread needs to be at least {}.", "The heap size per thread needs to be at least {}.",
@@ -339,17 +319,12 @@ impl IndexWriter {
Ok(index_writer) Ok(index_writer)
} }
fn drop_sender(&mut self) {
let (sender, _receiver) = channel::bounded(1);
mem::replace(&mut self.operation_sender, sender);
}
/// If there are some merging threads, blocks until they all finish their work and /// If there are some merging threads, blocks until they all finish their work and
/// then drop the `IndexWriter`. /// then drop the `IndexWriter`.
pub fn wait_merging_threads(mut self) -> crate::Result<()> { pub fn wait_merging_threads(mut self) -> Result<()> {
// this will stop the indexing thread, // this will stop the indexing thread,
// dropping the last reference to the segment_updater. // dropping the last reference to the segment_updater.
self.drop_sender(); drop(self.operation_sender);
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]); let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
for join_handle in former_workers_handles { for join_handle in former_workers_handles {
@@ -360,6 +335,7 @@ impl IndexWriter {
TantivyError::ErrorInThread("Error in indexing worker thread.".into()) TantivyError::ErrorInThread("Error in indexing worker thread.".into())
})?; })?;
} }
drop(self.workers_join_handle);
let result = self let result = self
.segment_updater .segment_updater
@@ -374,10 +350,10 @@ impl IndexWriter {
} }
#[doc(hidden)] #[doc(hidden)]
pub fn add_segment(&self, segment_meta: SegmentMeta) -> crate::Result<()> { pub fn add_segment(&mut self, segment_meta: SegmentMeta) {
let delete_cursor = self.delete_queue.cursor(); let delete_cursor = self.delete_queue.cursor();
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None); let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None);
block_on(self.segment_updater.schedule_add_segment(segment_entry)) self.segment_updater.add_segment(segment_entry);
} }
/// Creates a new segment. /// Creates a new segment.
@@ -394,7 +370,7 @@ impl IndexWriter {
/// Spawns a new worker thread for indexing. /// Spawns a new worker thread for indexing.
/// The thread consumes documents from the pipeline. /// The thread consumes documents from the pipeline.
fn add_indexing_worker(&mut self) -> crate::Result<()> { fn add_indexing_worker(&mut self) -> Result<()> {
let document_receiver_clone = self.operation_receiver.clone(); let document_receiver_clone = self.operation_receiver.clone();
let mut segment_updater = self.segment_updater.clone(); let mut segment_updater = self.segment_updater.clone();
@@ -402,7 +378,7 @@ impl IndexWriter {
let mem_budget = self.heap_size_in_bytes_per_thread; let mem_budget = self.heap_size_in_bytes_per_thread;
let index = self.index.clone(); let index = self.index.clone();
let join_handle: JoinHandle<crate::Result<()>> = thread::Builder::new() let join_handle: JoinHandle<Result<()>> = thread::Builder::new()
.name(format!("thrd-tantivy-index{}", self.worker_id)) .name(format!("thrd-tantivy-index{}", self.worker_id))
.spawn(move || { .spawn(move || {
loop { loop {
@@ -431,7 +407,7 @@ impl IndexWriter {
let segment = index.new_segment(); let segment = index.new_segment();
index_documents( index_documents(
mem_budget, mem_budget,
segment, &segment,
&mut document_iterator, &mut document_iterator,
&mut segment_updater, &mut segment_updater,
delete_cursor.clone(), delete_cursor.clone(),
@@ -448,23 +424,22 @@ impl IndexWriter {
self.segment_updater.get_merge_policy() self.segment_updater.get_merge_policy()
} }
/// Setter for the merge policy. /// Set the merge policy.
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
self.segment_updater.set_merge_policy(merge_policy); self.segment_updater.set_merge_policy(merge_policy);
} }
fn start_workers(&mut self) -> crate::Result<()> { fn start_workers(&mut self) -> Result<()> {
for _ in 0..self.num_threads { for _ in 0..self.num_threads {
self.add_indexing_worker()?; self.add_indexing_worker()?;
} }
Ok(()) Ok(())
} }
/// Detects and removes the files that are not used by the index anymore. /// Detects and removes the files that
pub fn garbage_collect_files( /// are not used by the index anymore.
&self, pub fn garbage_collect_files(&mut self) -> Result<()> {
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> { self.segment_updater.garbage_collect_files().wait()
self.segment_updater.schedule_garbage_collect()
} }
/// Deletes all documents from the index /// Deletes all documents from the index
@@ -503,7 +478,7 @@ impl IndexWriter {
/// Ok(()) /// Ok(())
/// } /// }
/// ``` /// ```
pub fn delete_all_documents(&self) -> crate::Result<Opstamp> { pub fn delete_all_documents(&mut self) -> Result<Opstamp> {
// Delete segments // Delete segments
self.segment_updater.remove_all_segments(); self.segment_updater.remove_all_segments();
// Return new stamp - reverted stamp // Return new stamp - reverted stamp
@@ -517,10 +492,8 @@ impl IndexWriter {
pub fn merge( pub fn merge(
&mut self, &mut self,
segment_ids: &[SegmentId], segment_ids: &[SegmentId],
) -> impl Future<Output = crate::Result<SegmentMeta>> { ) -> Result<impl Future<Item = SegmentMeta, Error = Canceled>> {
let merge_operation = self.segment_updater.make_merge_operation(segment_ids); self.segment_updater.start_merge(segment_ids)
let segment_updater = self.segment_updater.clone();
async move { segment_updater.start_merge(merge_operation)?.await }
} }
/// Closes the current document channel send. /// Closes the current document channel send.
@@ -546,8 +519,13 @@ impl IndexWriter {
/// state as it was after the last commit. /// state as it was after the last commit.
/// ///
/// The opstamp at the last commit is returned. /// The opstamp at the last commit is returned.
pub fn rollback(&mut self) -> crate::Result<Opstamp> { pub fn rollback(&mut self) -> Result<Opstamp> {
info!("Rolling back to opstamp {}", self.committed_opstamp); info!("Rolling back to opstamp {}", self.committed_opstamp);
self.rollback_impl()
}
/// Private, implementation of rollback
fn rollback_impl(&mut self) -> Result<Opstamp> {
// marks the segment updater as killed. From now on, all // marks the segment updater as killed. From now on, all
// segment updates will be ignored. // segment updates will be ignored.
self.segment_updater.kill(); self.segment_updater.kill();
@@ -603,7 +581,7 @@ impl IndexWriter {
/// It is also possible to add a payload to the `commit` /// It is also possible to add a payload to the `commit`
/// using this API. /// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html) /// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> crate::Result<PreparedCommit> { pub fn prepare_commit(&mut self) -> Result<PreparedCommit<'_>> {
// Here, because we join all of the worker threads, // Here, because we join all of the worker threads,
// all of the segment update for this commit have been // all of the segment update for this commit have been
// sent. // sent.
@@ -650,7 +628,7 @@ impl IndexWriter {
/// Commit returns the `opstamp` of the last document /// Commit returns the `opstamp` of the last document
/// that made it in the commit. /// that made it in the commit.
/// ///
pub fn commit(&mut self) -> crate::Result<Opstamp> { pub fn commit(&mut self) -> Result<Opstamp> {
self.prepare_commit()?.commit() self.prepare_commit()?.commit()
} }
@@ -691,6 +669,9 @@ impl IndexWriter {
/// The opstamp is an increasing `u64` that can /// The opstamp is an increasing `u64` that can
/// be used by the client to align commits with its own /// be used by the client to align commits with its own
/// document queue. /// document queue.
///
/// Currently it represents the number of documents that
/// have been added since the creation of the index.
pub fn add_document(&self, document: Document) -> Opstamp { pub fn add_document(&self, document: Document) -> Opstamp {
let opstamp = self.stamper.stamp(); let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document }; let add_operation = AddOperation { opstamp, document };
@@ -764,16 +745,6 @@ impl IndexWriter {
} }
} }
impl Drop for IndexWriter {
fn drop(&mut self) {
self.segment_updater.kill();
self.drop_sender();
for work in self.workers_join_handle.drain(..) {
let _ = work.join();
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -783,7 +754,7 @@ mod tests {
use crate::error::*; use crate::error::*;
use crate::indexer::NoMergePolicy; use crate::indexer::NoMergePolicy;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::{self, IndexRecordOption, STRING}; use crate::schema::{self, IndexRecordOption};
use crate::Index; use crate::Index;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use crate::Term; use crate::Term;
@@ -1208,16 +1179,4 @@ mod tests {
assert!(clear_again.is_ok()); assert!(clear_again.is_ok());
assert!(commit_again.is_ok()); assert!(commit_again.is_ok());
} }
#[test]
fn test_index_doc_missing_field() {
let mut schema_builder = schema::Schema::builder();
let idfield = schema_builder.add_text_field("id", STRING);
schema_builder.add_text_field("optfield", STRING);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(idfield=>"myid"));
let commit = index_writer.commit();
assert!(commit.is_ok());
}
} }

View File

@@ -2,23 +2,14 @@ use crate::Opstamp;
use crate::SegmentId; use crate::SegmentId;
use census::{Inventory, TrackedObject}; use census::{Inventory, TrackedObject};
use std::collections::HashSet; use std::collections::HashSet;
use std::ops::Deref;
#[derive(Default)] #[derive(Default)]
pub(crate) struct MergeOperationInventory(Inventory<InnerMergeOperation>); pub struct MergeOperationInventory(Inventory<InnerMergeOperation>);
impl Deref for MergeOperationInventory {
type Target = Inventory<InnerMergeOperation>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl MergeOperationInventory { impl MergeOperationInventory {
pub fn segment_in_merge(&self) -> HashSet<SegmentId> { pub fn segment_in_merge(&self) -> HashSet<SegmentId> {
let mut segment_in_merge = HashSet::default(); let mut segment_in_merge = HashSet::default();
for merge_op in self.list() { for merge_op in self.0.list() {
for &segment_id in &merge_op.segment_ids { for &segment_id in &merge_op.segment_ids {
segment_in_merge.insert(segment_id); segment_in_merge.insert(segment_id);
} }
@@ -44,13 +35,13 @@ pub struct MergeOperation {
inner: TrackedObject<InnerMergeOperation>, inner: TrackedObject<InnerMergeOperation>,
} }
pub(crate) struct InnerMergeOperation { struct InnerMergeOperation {
target_opstamp: Opstamp, target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
} }
impl MergeOperation { impl MergeOperation {
pub(crate) fn new( pub fn new(
inventory: &MergeOperationInventory, inventory: &MergeOperationInventory,
target_opstamp: Opstamp, target_opstamp: Opstamp,
segment_ids: Vec<SegmentId>, segment_ids: Vec<SegmentId>,
@@ -60,7 +51,7 @@ impl MergeOperation {
segment_ids, segment_ids,
}; };
MergeOperation { MergeOperation {
inner: inventory.track(inner_merge_operation), inner: inventory.0.track(inner_merge_operation),
} }
} }

View File

@@ -190,7 +190,8 @@ impl IndexMerger {
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>, mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
) -> Result<()> { ) -> Result<()> {
for (field, field_entry) in self.schema.fields() { for (field_id, field_entry) in self.schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
match *field_type { match *field_type {
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
@@ -648,12 +649,15 @@ impl IndexMerger {
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, TermOrdinalMapping>> { ) -> Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new(); let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() { for (field_ord, field_entry) in self.schema.fields().iter().enumerate() {
if field_entry.is_indexed() { if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = let indexed_field = Field(field_ord as u32);
self.write_postings_for_field(field, field_entry.field_type(), serializer)? if let Some(term_ordinal_mapping) = self.write_postings_for_field(
{ indexed_field,
term_ordinal_mappings.insert(field, term_ordinal_mapping); field_entry.field_type(),
serializer,
)? {
term_ordinal_mappings.insert(indexed_field, term_ordinal_mapping);
} }
} }
} }
@@ -709,7 +713,7 @@ mod tests {
use crate::IndexWriter; use crate::IndexWriter;
use crate::Searcher; use crate::Searcher;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use futures::executor::block_on; use futures::Future;
use std::io::Cursor; use std::io::Cursor;
#[test] #[test]
@@ -792,7 +796,11 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
} }
{ {
@@ -1036,7 +1044,11 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
@@ -1131,7 +1143,11 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
@@ -1265,7 +1281,11 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap(); reader.reload().unwrap();
test_searcher( test_searcher(
@@ -1320,7 +1340,11 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids)).expect("Merging failed"); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
reader.reload().unwrap(); reader.reload().unwrap();
// commit has not been called yet. The document should still be // commit has not been called yet. The document should still be
// there. // there.
@@ -1341,18 +1365,22 @@ mod tests {
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_u64(int_field, 1); doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone()); index_writer.add_document(doc.clone());
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("commit failed");
index_writer.add_document(doc); index_writer.add_document(doc);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("commit failed");
index_writer.delete_term(Term::from_field_u64(int_field, 1)); index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
assert!(block_on(index_writer.merge(&segment_ids)).is_ok()); index_writer
.merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
// assert delete has not been committed // assert delete has not been committed
assert!(reader.reload().is_ok()); reader.reload().expect("failed to load searcher 1");
let searcher = reader.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
@@ -1391,12 +1419,12 @@ mod tests {
index_doc(&mut index_writer, &[1, 5]); index_doc(&mut index_writer, &[1, 5]);
index_doc(&mut index_writer, &[3]); index_doc(&mut index_writer, &[3]);
index_doc(&mut index_writer, &[17]); index_doc(&mut index_writer, &[17]);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[20]); index_doc(&mut index_writer, &[20]);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[28, 27]); index_doc(&mut index_writer, &[28, 27]);
index_doc(&mut index_writer, &[1_000]); index_doc(&mut index_writer, &[1_000]);
assert!(index_writer.commit().is_ok()); index_writer.commit().expect("committed");
} }
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
@@ -1428,6 +1456,15 @@ mod tests {
assert_eq!(&vals, &[17]); assert_eq!(&vals, &[17]);
} }
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
{ {
let segment = searcher.segment_reader(1u32); let segment = searcher.segment_reader(1u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1451,13 +1488,27 @@ mod tests {
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
assert!(block_on(index_writer.merge(&segment_ids)).is_ok()); index_writer
assert!(index_writer.wait_merging_threads().is_ok()); .merge(&segment_ids)
.expect("Failed to initiate merge")
.wait()
.expect("Merging failed");
index_writer
.wait_merging_threads()
.expect("Wait for merging threads");
} }
assert!(reader.reload().is_ok()); reader.reload().expect("Load searcher");
{ {
let searcher = reader.searcher(); let searcher = reader.searcher();
println!(
"{:?}",
searcher
.segment_readers()
.iter()
.map(|reader| reader.max_doc())
.collect::<Vec<_>>()
);
let segment = searcher.segment_reader(0u32); let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap(); let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
@@ -1492,46 +1543,4 @@ mod tests {
assert_eq!(&vals, &[20]); assert_eq!(&vals, &[20]);
} }
} }
#[test]
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let fast_multi = IntOptions::default().set_fast(Cardinality::MultiValues);
let field = builder.add_f64_field("f64", schema::FAST);
let multi_field = builder.add_f64_field("f64s", fast_multi);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_with_num_threads(1, 3_000_000)?;
// Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default();
policy.set_min_merge_size(2);
writer.set_merge_policy(Box::new(policy));
for i in 0..100 {
let mut doc = Document::new();
doc.add_f64(field, 42.0);
doc.add_f64(multi_field, 0.24);
doc.add_f64(multi_field, 0.27);
writer.add_document(doc);
if i % 5 == 0 {
writer.commit()?;
}
}
writer.commit()?;
writer.wait_merging_threads()?;
// If a merging thread fails, we should end up with more
// than one segment here
assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
} }

View File

@@ -18,7 +18,7 @@ mod stamper;
pub use self::index_writer::IndexWriter; pub use self::index_writer::IndexWriter;
pub use self::log_merge_policy::LogMergePolicy; pub use self::log_merge_policy::LogMergePolicy;
pub use self::merge_operation::MergeOperation; pub use self::merge_operation::{MergeOperation, MergeOperationInventory};
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy}; pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
pub use self::prepared_commit::PreparedCommit; pub use self::prepared_commit::PreparedCommit;
pub use self::segment_entry::SegmentEntry; pub use self::segment_entry::SegmentEntry;
@@ -28,26 +28,3 @@ pub use self::segment_writer::SegmentWriter;
/// Alias for the default merge policy, which is the `LogMergePolicy`. /// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy; pub type DefaultMergePolicy = LogMergePolicy;
#[cfg(test)]
mod tests {
use crate::schema::{self, Schema};
use crate::{Index, Term};
#[test]
fn test_advance_delete_bug() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// there must be one deleted document in the segment
index_writer.add_document(doc!(text_field=>"b"));
index_writer.delete_term(Term::from_field_text(text_field, "b"));
// we need enough data to trigger the bug (at least 32 documents)
for _ in 0..32 {
index_writer.add_document(doc!(text_field=>"c"));
}
index_writer.commit().unwrap();
index_writer.commit().unwrap();
}
}

View File

@@ -1,7 +1,6 @@
use super::IndexWriter; use super::IndexWriter;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use crate::Result;
use futures::executor::block_on;
/// A prepared commit /// A prepared commit
pub struct PreparedCommit<'a> { pub struct PreparedCommit<'a> {
@@ -33,11 +32,9 @@ impl<'a> PreparedCommit<'a> {
pub fn commit(self) -> Result<Opstamp> { pub fn commit(self) -> Result<Opstamp> {
info!("committing {}", self.opstamp); info!("committing {}", self.opstamp);
let _ = block_on( self.index_writer
self.index_writer .segment_updater()
.segment_updater() .commit(self.opstamp, self.payload)?;
.schedule_commit(self.opstamp, self.payload),
);
Ok(self.opstamp) Ok(self.opstamp)
} }
} }

View File

@@ -1,7 +1,7 @@
use crate::common::BitSet;
use crate::core::SegmentId; use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use bit_set::BitSet;
use std::fmt; use std::fmt;
/// A segment entry describes the state of /// A segment entry describes the state of

View File

@@ -16,28 +16,6 @@ struct SegmentRegisters {
committed: SegmentRegister, committed: SegmentRegister,
} }
#[derive(PartialEq, Eq)]
pub(crate) enum SegmentsStatus {
Committed,
Uncommitted,
}
impl SegmentRegisters {
/// Check if all the segments are committed or uncommited.
///
/// If some segment is missing or segments are in a different state (this should not happen
/// if tantivy is used correctly), returns `None`.
fn segments_status(&self, segment_ids: &[SegmentId]) -> Option<SegmentsStatus> {
if self.uncommitted.contains_all(segment_ids) {
Some(SegmentsStatus::Uncommitted)
} else if self.committed.contains_all(segment_ids) {
Some(SegmentsStatus::Committed)
} else {
None
}
}
}
/// The segment manager stores the list of segments /// The segment manager stores the list of segments
/// as well as their state. /// as well as their state.
/// ///
@@ -175,35 +153,33 @@ impl SegmentManager {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock.uncommitted.add_segment_entry(segment_entry); registers_lock.uncommitted.add_segment_entry(segment_entry);
} }
// Replace a list of segments for their equivalent merged segment.
// pub fn end_merge(
// Returns true if these segments are committed, false if the merge segments are uncommited.
pub(crate) fn end_merge(
&self, &self,
before_merge_segment_ids: &[SegmentId], before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry, after_merge_segment_entry: SegmentEntry,
) -> crate::Result<SegmentsStatus> { ) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
let segments_status = registers_lock let target_register: &mut SegmentRegister = {
.segments_status(before_merge_segment_ids) if registers_lock
.ok_or_else(|| { .uncommitted
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.uncommitted
} else if registers_lock
.committed
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.committed
} else {
warn!("couldn't find segment in SegmentManager"); warn!("couldn't find segment in SegmentManager");
crate::Error::InvalidArgument( return;
"The segments that were merged could not be found in the SegmentManager. \ }
This is not necessarily a bug, and can happen after a rollback for instance."
.to_string(),
)
})?;
let target_register: &mut SegmentRegister = match segments_status {
SegmentsStatus::Uncommitted => &mut registers_lock.uncommitted,
SegmentsStatus::Committed => &mut registers_lock.committed,
}; };
for segment_id in before_merge_segment_ids { for segment_id in before_merge_segment_ids {
target_register.remove_segment(segment_id); target_register.remove_segment(segment_id);
} }
target_register.add_segment_entry(after_merge_segment_entry); target_register.add_segment_entry(after_merge_segment_entry);
Ok(segments_status)
} }
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> { pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {

View File

@@ -1,13 +1,10 @@
use crate::Directory; use crate::Result;
use crate::core::Segment; use crate::core::Segment;
use crate::core::SegmentComponent; use crate::core::SegmentComponent;
use crate::directory::error::OpenWriteError;
use crate::directory::{DirectoryClone, RAMDirectory, TerminatingWrite, WritePtr};
use crate::fastfield::FastFieldSerializer; use crate::fastfield::FastFieldSerializer;
use crate::fieldnorm::FieldNormsSerializer; use crate::fieldnorm::FieldNormsSerializer;
use crate::postings::InvertedIndexSerializer; use crate::postings::InvertedIndexSerializer;
use crate::schema::Schema;
use crate::store::StoreWriter; use crate::store::StoreWriter;
/// Segment serializer is in charge of laying out on disk /// Segment serializer is in charge of laying out on disk
@@ -17,50 +14,25 @@ pub struct SegmentSerializer {
fast_field_serializer: FastFieldSerializer, fast_field_serializer: FastFieldSerializer,
fieldnorms_serializer: FieldNormsSerializer, fieldnorms_serializer: FieldNormsSerializer,
postings_serializer: InvertedIndexSerializer, postings_serializer: InvertedIndexSerializer,
bundle_writer: Option<(RAMDirectory, WritePtr)>,
}
pub(crate) struct SegmentSerializerWriters {
postings_wrt: WritePtr,
positions_skip_wrt: WritePtr,
positions_wrt: WritePtr,
terms_wrt: WritePtr,
fast_field_wrt: WritePtr,
fieldnorms_wrt: WritePtr,
store_wrt: WritePtr,
}
impl SegmentSerializerWriters {
pub(crate) fn for_segment(segment: &mut Segment) -> Result<Self, OpenWriteError> {
Ok(SegmentSerializerWriters {
postings_wrt: segment.open_write(SegmentComponent::POSTINGS)?,
positions_skip_wrt: segment.open_write(SegmentComponent::POSITIONS)?,
positions_wrt: segment.open_write(SegmentComponent::POSITIONSSKIP)?,
terms_wrt: segment.open_write(SegmentComponent::TERMS)?,
fast_field_wrt: segment.open_write(SegmentComponent::FASTFIELDS)?,
fieldnorms_wrt: segment.open_write(SegmentComponent::FIELDNORMS)?,
store_wrt: segment.open_write(SegmentComponent::STORE)?,
})
}
} }
impl SegmentSerializer { impl SegmentSerializer {
pub(crate) fn new(schema: Schema, writers: SegmentSerializerWriters) -> crate::Result<Self> { /// Creates a new `SegmentSerializer`.
let fast_field_serializer = FastFieldSerializer::from_write(writers.fast_field_wrt)?; pub fn for_segment(segment: &mut Segment) -> Result<SegmentSerializer> {
let fieldnorms_serializer = FieldNormsSerializer::from_write(writers.fieldnorms_wrt)?; let store_write = segment.open_write(SegmentComponent::STORE)?;
let postings_serializer = InvertedIndexSerializer::open(
schema, let fast_field_write = segment.open_write(SegmentComponent::FASTFIELDS)?;
writers.terms_wrt, let fast_field_serializer = FastFieldSerializer::from_write(fast_field_write)?;
writers.postings_wrt,
writers.positions_wrt, let fieldnorms_write = segment.open_write(SegmentComponent::FIELDNORMS)?;
writers.positions_skip_wrt, let fieldnorms_serializer = FieldNormsSerializer::from_write(fieldnorms_write)?;
);
let postings_serializer = InvertedIndexSerializer::open(segment)?;
Ok(SegmentSerializer { Ok(SegmentSerializer {
store_writer: StoreWriter::new(writers.store_wrt), store_writer: StoreWriter::new(store_write),
fast_field_serializer, fast_field_serializer,
fieldnorms_serializer, fieldnorms_serializer,
postings_serializer, postings_serializer,
bundle_writer: None,
}) })
} }
@@ -85,15 +57,11 @@ impl SegmentSerializer {
} }
/// Finalize the segment serialization. /// Finalize the segment serialization.
pub fn close(mut self) -> crate::Result<()> { pub fn close(self) -> Result<()> {
self.fast_field_serializer.close()?; self.fast_field_serializer.close()?;
self.postings_serializer.close()?; self.postings_serializer.close()?;
self.store_writer.close()?; self.store_writer.close()?;
self.fieldnorms_serializer.close()?; self.fieldnorms_serializer.close()?;
if let Some((ram_directory, mut bundle_wrt)) = self.bundle_writer.take() {
ram_directory.serialize_bundle(&mut bundle_wrt)?;
bundle_wrt.terminate()?;
}
Ok(()) Ok(())
} }
} }

View File

@@ -6,35 +6,39 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::{Directory, DirectoryClone, GarbageCollectionResult}; use crate::directory::{Directory, DirectoryClone};
use crate::error::TantivyError;
use crate::indexer::delete_queue::DeleteCursor; use crate::indexer::delete_queue::DeleteCursor;
use crate::indexer::index_writer::advance_deletes; use crate::indexer::index_writer::advance_deletes;
use crate::indexer::merge_operation::MergeOperationInventory; use crate::indexer::merge_operation::MergeOperationInventory;
use crate::indexer::merger::IndexMerger; use crate::indexer::merger::IndexMerger;
use crate::indexer::segment_manager::SegmentsStatus;
use crate::indexer::segment_serializer::SegmentSerializerWriters;
use crate::indexer::stamper::Stamper; use crate::indexer::stamper::Stamper;
use crate::indexer::MergeOperation;
use crate::indexer::SegmentEntry; use crate::indexer::SegmentEntry;
use crate::indexer::SegmentSerializer; use crate::indexer::SegmentSerializer;
use crate::indexer::{DefaultMergePolicy, MergePolicy}; use crate::indexer::{DefaultMergePolicy, MergePolicy};
use crate::indexer::{MergeCandidate, MergeOperation};
use crate::schema::Schema; use crate::schema::Schema;
use crate::Opstamp; use crate::Opstamp;
use futures::channel::oneshot; use crate::Result;
use futures::executor::{ThreadPool, ThreadPoolBuilder}; use futures::oneshot;
use futures::future::Future; use futures::sync::oneshot::Receiver;
use futures::future::TryFutureExt; use futures::Future;
use futures_cpupool::Builder as CpuPoolBuilder;
use futures_cpupool::CpuFuture;
use futures_cpupool::CpuPool;
use serde_json; use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::io::Write; use std::io::Write;
use std::ops::Deref; use std::mem;
use std::ops::DerefMut;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::sync::RwLock; use std::sync::RwLock;
use std::thread;
const NUM_MERGE_THREADS: usize = 4; use std::thread::JoinHandle;
/// Save the index meta file. /// Save the index meta file.
/// This operation is atomic : /// This operation is atomic :
@@ -45,7 +49,7 @@ const NUM_MERGE_THREADS: usize = 4;
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::Result<()> { pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> Result<()> {
save_metas( save_metas(
&IndexMeta { &IndexMeta {
segments: Vec::new(), segments: Vec::new(),
@@ -66,7 +70,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut dyn Directory) -> crate::R
/// and flushed. /// and flushed.
/// ///
/// This method is not part of tantivy's public API /// This method is not part of tantivy's public API
fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result<()> { fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> Result<()> {
info!("save metas"); info!("save metas");
let mut buffer = serde_json::to_vec_pretty(metas)?; let mut buffer = serde_json::to_vec_pretty(metas)?;
// Just adding a new line at the end of the buffer. // Just adding a new line at the end of the buffer.
@@ -85,38 +89,21 @@ fn save_metas(metas: &IndexMeta, directory: &mut dyn Directory) -> crate::Result
// We voluntarily pass a merge_operation ref to guarantee that // We voluntarily pass a merge_operation ref to guarantee that
// the merge_operation is alive during the process // the merge_operation is alive during the process
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct SegmentUpdater(Arc<InnerSegmentUpdater>); pub struct SegmentUpdater(Arc<InnerSegmentUpdater>);
impl Deref for SegmentUpdater { fn perform_merge(
type Target = InnerSegmentUpdater; merge_operation: &MergeOperation,
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
async fn garbage_collect_files(
segment_updater: SegmentUpdater,
) -> crate::Result<GarbageCollectionResult> {
info!("Running garbage collection");
let mut index = segment_updater.index.clone();
index
.directory_mut()
.garbage_collect(move || segment_updater.list_files())
}
/// Merges a list of segments the list of segment givens in the `segment_entries`.
/// This function happens in the calling thread and is computationally expensive.
fn merge(
index: &Index, index: &Index,
mut segment_entries: Vec<SegmentEntry>, mut segment_entries: Vec<SegmentEntry>,
target_opstamp: Opstamp, ) -> Result<SegmentEntry> {
) -> crate::Result<SegmentEntry> { let target_opstamp = merge_operation.target_opstamp();
// first we need to apply deletes to our segment. // first we need to apply deletes to our segment.
let mut merged_segment = index.new_segment(); let mut merged_segment = index.new_segment();
// First we apply all of the delet to the merged segment, up to the target opstamp. // TODO add logging
let schema = index.schema();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
let segment = index.segment(segment_entry.meta().clone()); let segment = index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?; advance_deletes(segment, segment_entry, target_opstamp)?;
@@ -130,21 +117,22 @@ fn merge(
.collect(); .collect();
// An IndexMerger is like a "view" of our merged segments. // An IndexMerger is like a "view" of our merged segments.
let merger: IndexMerger = IndexMerger::open(index.schema(), &segments[..])?; let merger: IndexMerger = IndexMerger::open(schema, &segments[..])?;
// ... we just serialize this index merger in our new segment to merge the two segments. // ... we just serialize this index merger in our new segment
let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut merged_segment)?; // to merge the two segments.
let segment_serializer =
SegmentSerializer::new(merged_segment.schema(), segment_serializer_wrts)?; let segment_serializer = SegmentSerializer::for_segment(&mut merged_segment)?;
let num_docs = merger.write(segment_serializer)?; let num_docs = merger.write(segment_serializer)?;
let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs); let segment_meta = index.new_segment_meta(merged_segment.id(), num_docs);
Ok(SegmentEntry::new(segment_meta, delete_cursor, None)) let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None);
Ok(after_merge_segment_entry)
} }
pub(crate) struct InnerSegmentUpdater { struct InnerSegmentUpdater {
// we keep a copy of the current active IndexMeta to // we keep a copy of the current active IndexMeta to
// avoid loading the file everytime we need it in the // avoid loading the file everytime we need it in the
// `SegmentUpdater`. // `SegmentUpdater`.
@@ -152,12 +140,12 @@ pub(crate) struct InnerSegmentUpdater {
// This should be up to date as all update happen through // This should be up to date as all update happen through
// the unique active `SegmentUpdater`. // the unique active `SegmentUpdater`.
active_metas: RwLock<Arc<IndexMeta>>, active_metas: RwLock<Arc<IndexMeta>>,
pool: ThreadPool, pool: CpuPool,
merge_thread_pool: ThreadPool,
index: Index, index: Index,
segment_manager: SegmentManager, segment_manager: SegmentManager,
merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>, merge_policy: RwLock<Arc<Box<dyn MergePolicy>>>,
merging_thread_id: AtomicUsize,
merging_threads: RwLock<HashMap<usize, JoinHandle<Result<()>>>>,
killed: AtomicBool, killed: AtomicBool,
stamper: Stamper, stamper: Stamper,
merge_operations: MergeOperationInventory, merge_operations: MergeOperationInventory,
@@ -168,31 +156,22 @@ impl SegmentUpdater {
index: Index, index: Index,
stamper: Stamper, stamper: Stamper,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
) -> crate::Result<SegmentUpdater> { ) -> Result<SegmentUpdater> {
let segments = index.searchable_segment_metas()?; let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor); let segment_manager = SegmentManager::from_segments(segments, delete_cursor);
let pool = ThreadPoolBuilder::new() let pool = CpuPoolBuilder::new()
.name_prefix("segment_updater") .name_prefix("segment_updater")
.pool_size(1) .pool_size(1)
.create() .create();
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment updater thread".to_string())
})?;
let merge_thread_pool = ThreadPoolBuilder::new()
.name_prefix("merge_thread")
.pool_size(NUM_MERGE_THREADS)
.create()
.map_err(|_| {
crate::Error::SystemError("Failed to spawn segment merging thread".to_string())
})?;
let index_meta = index.load_metas()?; let index_meta = index.load_metas()?;
Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater { Ok(SegmentUpdater(Arc::new(InnerSegmentUpdater {
active_metas: RwLock::new(Arc::new(index_meta)), active_metas: RwLock::new(Arc::new(index_meta)),
pool, pool,
merge_thread_pool,
index, index,
segment_manager, segment_manager,
merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))), merge_policy: RwLock::new(Arc::new(Box::new(DefaultMergePolicy::default()))),
merging_thread_id: AtomicUsize::default(),
merging_threads: RwLock::new(HashMap::new()),
killed: AtomicBool::new(false), killed: AtomicBool::new(false),
stamper, stamper,
merge_operations: Default::default(), merge_operations: Default::default(),
@@ -200,82 +179,67 @@ impl SegmentUpdater {
} }
pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> { pub fn get_merge_policy(&self) -> Arc<Box<dyn MergePolicy>> {
self.merge_policy.read().unwrap().clone() self.0.merge_policy.read().unwrap().clone()
} }
pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) { pub fn set_merge_policy(&self, merge_policy: Box<dyn MergePolicy>) {
let arc_merge_policy = Arc::new(merge_policy); let arc_merge_policy = Arc::new(merge_policy);
*self.merge_policy.write().unwrap() = arc_merge_policy; *self.0.merge_policy.write().unwrap() = arc_merge_policy;
} }
fn schedule_future<T: 'static + Send, F: Future<Output = crate::Result<T>> + 'static + Send>( fn get_merging_thread_id(&self) -> usize {
self.0.merging_thread_id.fetch_add(1, Ordering::SeqCst)
}
fn run_async<T: 'static + Send, F: 'static + Send + FnOnce(SegmentUpdater) -> T>(
&self, &self,
f: F, f: F,
) -> impl Future<Output = crate::Result<T>> { ) -> CpuFuture<T, TantivyError> {
let (sender, receiver) = oneshot::channel(); let me_clone = self.clone();
if self.is_alive() { self.0.pool.spawn_fn(move || Ok(f(me_clone)))
self.pool.spawn_ok(async move {
let _ = sender.send(f.await);
});
} else {
let _ = sender.send(Err(crate::TantivyError::SystemError(
"Segment updater killed".to_string(),
)));
}
receiver.unwrap_or_else(|_| {
let err_msg =
"A segment_updater future did not success. This should never happen.".to_string();
Err(crate::Error::SystemError(err_msg))
})
} }
pub fn schedule_add_segment( pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
&self, self.run_async(|segment_updater| {
segment_entry: SegmentEntry, segment_updater.0.segment_manager.add_segment(segment_entry);
) -> impl Future<Output = crate::Result<()>> { segment_updater.consider_merge_options();
let segment_updater = self.clone(); true
self.schedule_future(async move {
segment_updater.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options().await;
Ok(())
}) })
.forget();
true
} }
/// Orders `SegmentManager` to remove all segments /// Orders `SegmentManager` to remove all segments
pub(crate) fn remove_all_segments(&self) { pub(crate) fn remove_all_segments(&self) {
self.segment_manager.remove_all_segments(); self.0.segment_manager.remove_all_segments();
} }
pub fn kill(&mut self) { pub fn kill(&mut self) {
self.killed.store(true, Ordering::Release); self.0.killed.store(true, Ordering::Release);
} }
pub fn is_alive(&self) -> bool { pub fn is_alive(&self) -> bool {
!self.killed.load(Ordering::Acquire) !self.0.killed.load(Ordering::Acquire)
} }
/// Apply deletes up to the target opstamp to all segments. /// Apply deletes up to the target opstamp to all segments.
/// ///
/// The method returns copies of the segment entries, /// The method returns copies of the segment entries,
/// updated with the delete information. /// updated with the delete information.
fn purge_deletes(&self, target_opstamp: Opstamp) -> crate::Result<Vec<SegmentEntry>> { fn purge_deletes(&self, target_opstamp: Opstamp) -> Result<Vec<SegmentEntry>> {
let mut segment_entries = self.segment_manager.segment_entries(); let mut segment_entries = self.0.segment_manager.segment_entries();
for segment_entry in &mut segment_entries { for segment_entry in &mut segment_entries {
let segment = self.index.segment(segment_entry.meta().clone()); let segment = self.0.index.segment(segment_entry.meta().clone());
advance_deletes(segment, segment_entry, target_opstamp)?; advance_deletes(segment, segment_entry, target_opstamp)?;
} }
Ok(segment_entries) Ok(segment_entries)
} }
pub fn save_metas( pub fn save_metas(&self, opstamp: Opstamp, commit_message: Option<String>) {
&self,
opstamp: Opstamp,
commit_message: Option<String>,
) -> crate::Result<()> {
if self.is_alive() { if self.is_alive() {
let index = &self.index; let index = &self.0.index;
let directory = index.directory(); let directory = index.directory();
let mut commited_segment_metas = self.segment_manager.committed_segment_metas(); let mut commited_segment_metas = self.0.segment_manager.committed_segment_metas();
// We sort segment_readers by number of documents. // We sort segment_readers by number of documents.
// This is an heuristic to make multithreading more efficient. // This is an heuristic to make multithreading more efficient.
@@ -297,18 +261,16 @@ impl SegmentUpdater {
opstamp, opstamp,
payload: commit_message, payload: commit_message,
}; };
// TODO add context to the error. save_metas(&index_meta, directory.box_clone().borrow_mut())
save_metas(&index_meta, directory.box_clone().borrow_mut())?; .expect("Could not save metas.");
self.store_meta(&index_meta); self.store_meta(&index_meta);
} }
Ok(())
} }
pub fn schedule_garbage_collect( pub fn garbage_collect_files(&self) -> CpuFuture<(), TantivyError> {
&self, self.run_async(move |segment_updater| {
) -> impl Future<Output = crate::Result<GarbageCollectionResult>> { segment_updater.garbage_collect_files_exec();
let garbage_collect_future = garbage_collect_files(self.clone()); })
self.schedule_future(garbage_collect_future)
} }
/// List the files that are useful to the index. /// List the files that are useful to the index.
@@ -316,130 +278,148 @@ impl SegmentUpdater {
/// This does not include lock files, or files that are obsolete /// This does not include lock files, or files that are obsolete
/// but have not yet been deleted by the garbage collector. /// but have not yet been deleted by the garbage collector.
fn list_files(&self) -> HashSet<PathBuf> { fn list_files(&self) -> HashSet<PathBuf> {
let mut files: HashSet<PathBuf> = self let mut files = HashSet::new();
.index
.list_all_segment_metas()
.into_iter()
.flat_map(|segment_meta| segment_meta.list_files())
.collect();
files.insert(META_FILEPATH.to_path_buf()); files.insert(META_FILEPATH.to_path_buf());
for segment_meta in self.0.index.list_all_segment_metas() {
files.extend(segment_meta.list_files());
}
files files
} }
pub fn schedule_commit( fn garbage_collect_files_exec(&self) {
&self, info!("Running garbage collection");
opstamp: Opstamp, let mut index = self.0.index.clone();
payload: Option<String>, index.directory_mut().garbage_collect(|| self.list_files());
) -> impl Future<Output = crate::Result<()>> { }
let segment_updater: SegmentUpdater = self.clone();
self.schedule_future(async move { pub fn commit(&self, opstamp: Opstamp, payload: Option<String>) -> Result<()> {
let segment_entries = segment_updater.purge_deletes(opstamp)?; self.run_async(move |segment_updater| {
segment_updater.segment_manager.commit(segment_entries); if segment_updater.is_alive() {
segment_updater.save_metas(opstamp, payload)?; let segment_entries = segment_updater
let _ = garbage_collect_files(segment_updater.clone()).await; .purge_deletes(opstamp)
segment_updater.consider_merge_options().await; .expect("Failed purge deletes");
Ok(()) segment_updater.0.segment_manager.commit(segment_entries);
segment_updater.save_metas(opstamp, payload);
segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options();
}
}) })
.wait()
}
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> Result<Receiver<SegmentMeta>> {
let commit_opstamp = self.load_metas().opstamp;
let merge_operation = MergeOperation::new(
&self.0.merge_operations,
commit_opstamp,
segment_ids.to_vec(),
);
self.run_async(move |segment_updater| segment_updater.start_merge_impl(merge_operation))
.wait()?
} }
fn store_meta(&self, index_meta: &IndexMeta) { fn store_meta(&self, index_meta: &IndexMeta) {
*self.active_metas.write().unwrap() = Arc::new(index_meta.clone()); *self.0.active_metas.write().unwrap() = Arc::new(index_meta.clone());
} }
fn load_metas(&self) -> Arc<IndexMeta> { fn load_metas(&self) -> Arc<IndexMeta> {
self.active_metas.read().unwrap().clone() self.0.active_metas.read().unwrap().clone()
} }
pub(crate) fn make_merge_operation(&self, segment_ids: &[SegmentId]) -> MergeOperation {
let commit_opstamp = self.load_metas().opstamp;
MergeOperation::new(&self.merge_operations, commit_opstamp, segment_ids.to_vec())
}
// Starts a merge operation. This function will block until the merge operation is effectively
// started. Note that it does not wait for the merge to terminate.
// The calling thread should not be block for a long time, as this only involve waiting for the
// `SegmentUpdater` queue which in turns only contains lightweight operations.
//
// The merge itself happens on a different thread.
//
// When successful, this function returns a `Future` for a `Result<SegmentMeta>` that represents
// the actual outcome of the merge operation.
//
// It returns an error if for some reason the merge operation could not be started.
//
// At this point an error is not necessarily the sign of a malfunction.
// (e.g. A rollback could have happened, between the instant when the merge operaiton was
// suggested and the moment when it ended up being executed.)
//
// `segment_ids` is required to be non-empty. // `segment_ids` is required to be non-empty.
pub fn start_merge( fn start_merge_impl(&self, merge_operation: MergeOperation) -> Result<Receiver<SegmentMeta>> {
&self,
merge_operation: MergeOperation,
) -> crate::Result<impl Future<Output = crate::Result<SegmentMeta>>> {
assert!( assert!(
!merge_operation.segment_ids().is_empty(), !merge_operation.segment_ids().is_empty(),
"Segment_ids cannot be empty." "Segment_ids cannot be empty."
); );
let segment_updater = self.clone(); let segment_updater_clone = self.clone();
let segment_entries: Vec<SegmentEntry> = self let segment_entries: Vec<SegmentEntry> = self
.0
.segment_manager .segment_manager
.start_merge(merge_operation.segment_ids())?; .start_merge(merge_operation.segment_ids())?;
info!("Starting merge - {:?}", merge_operation.segment_ids()); // let segment_ids_vec = merge_operation.segment_ids.to_vec();
let (merging_future_send, merging_future_recv) = let merging_thread_id = self.get_merging_thread_id();
oneshot::channel::<crate::Result<SegmentMeta>>(); info!(
"Starting merge thread #{} - {:?}",
merging_thread_id,
merge_operation.segment_ids()
);
let (merging_future_send, merging_future_recv) = oneshot();
self.merge_thread_pool.spawn_ok(async move { // first we need to apply deletes to our segment.
// The fact that `merge_operation` is moved here is important. let merging_join_handle = thread::Builder::new()
// Its lifetime is used to track how many merging thread are currently running, .name(format!("mergingthread-{}", merging_thread_id))
// as well as which segment is currently in merge and therefore should not be .spawn(move || {
// candidate for another merge. // first we need to apply deletes to our segment.
match merge( let merge_result = perform_merge(
&segment_updater.index, &merge_operation,
segment_entries, &segment_updater_clone.0.index,
merge_operation.target_opstamp(), segment_entries,
) { );
Ok(after_merge_segment_entry) => {
let segment_meta = segment_updater match merge_result {
.end_merge(merge_operation, after_merge_segment_entry) Ok(after_merge_segment_entry) => {
.await; let merged_segment_meta = after_merge_segment_entry.meta().clone();
let _send_result = merging_future_send.send(segment_meta); segment_updater_clone
} .end_merge(merge_operation, after_merge_segment_entry)
Err(e) => { .expect("Segment updater thread is corrupted.");
warn!(
"Merge of {:?} was cancelled: {:?}", // the future may fail if the listener of the oneshot future
merge_operation.segment_ids().to_vec(), // has been destroyed.
e //
); // This is not a problem here, so we just ignore any
// ... cancel merge // possible error.
if cfg!(test) { let _merging_future_res = merging_future_send.send(merged_segment_meta);
panic!("Merge failed."); }
Err(e) => {
warn!(
"Merge of {:?} was cancelled: {:?}",
merge_operation.segment_ids(),
e
);
// ... cancel merge
if cfg!(test) {
panic!("Merge failed.");
}
// As `merge_operation` will be dropped, the segment in merge state will
// be available for merge again.
// `merging_future_send` will be dropped, sending an error to the future.
} }
} }
} segment_updater_clone
}); .0
.merging_threads
Ok(merging_future_recv .write()
.unwrap_or_else(|_| Err(crate::Error::SystemError("Merge failed".to_string())))) .unwrap()
.remove(&merging_thread_id);
Ok(())
})
.expect("Failed to spawn a thread.");
self.0
.merging_threads
.write()
.unwrap()
.insert(merging_thread_id, merging_join_handle);
Ok(merging_future_recv)
} }
async fn consider_merge_options(&self) { fn consider_merge_options(&self) {
let merge_segment_ids: HashSet<SegmentId> = self.merge_operations.segment_in_merge(); let merge_segment_ids: HashSet<SegmentId> = self.0.merge_operations.segment_in_merge();
let (committed_segments, uncommitted_segments) = let (committed_segments, uncommitted_segments) =
get_mergeable_segments(&merge_segment_ids, &self.segment_manager); get_mergeable_segments(&merge_segment_ids, &self.0.segment_manager);
// Committed segments cannot be merged with uncommitted_segments. // Committed segments cannot be merged with uncommitted_segments.
// We therefore consider merges using these two sets of segments independently. // We therefore consider merges using these two sets of segments independently.
let merge_policy = self.get_merge_policy(); let merge_policy = self.get_merge_policy();
let current_opstamp = self.stamper.stamp(); let current_opstamp = self.0.stamper.stamp();
let mut merge_candidates: Vec<MergeOperation> = merge_policy let mut merge_candidates: Vec<MergeOperation> = merge_policy
.compute_merge_candidates(&uncommitted_segments) .compute_merge_candidates(&uncommitted_segments)
.into_iter() .into_iter()
.map(|merge_candidate| { .map(|merge_candidate| {
MergeOperation::new(&self.merge_operations, current_opstamp, merge_candidate.0) MergeOperation::new(&self.0.merge_operations, current_opstamp, merge_candidate.0)
}) })
.collect(); .collect();
@@ -447,18 +427,25 @@ impl SegmentUpdater {
let committed_merge_candidates = merge_policy let committed_merge_candidates = merge_policy
.compute_merge_candidates(&committed_segments) .compute_merge_candidates(&committed_segments)
.into_iter() .into_iter()
.map(|merge_candidate: MergeCandidate| { .map(|merge_candidate| {
MergeOperation::new(&self.merge_operations, commit_opstamp, merge_candidate.0) MergeOperation::new(&self.0.merge_operations, commit_opstamp, merge_candidate.0)
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter()); merge_candidates.extend(committed_merge_candidates.into_iter());
for merge_operation in merge_candidates { for merge_operation in merge_candidates {
if let Err(err) = self.start_merge(merge_operation) { match self.start_merge_impl(merge_operation) {
warn!( Ok(merge_future) => {
"Starting the merge failed for the following reason. This is not fatal. {}", if let Err(e) = merge_future.fuse().poll() {
err error!("The merge task failed quickly after starting: {:?}", e);
); }
}
Err(err) => {
warn!(
"Starting the merge failed for the following reason. This is not fatal. {}",
err
);
}
} }
} }
} }
@@ -467,17 +454,15 @@ impl SegmentUpdater {
&self, &self,
merge_operation: MergeOperation, merge_operation: MergeOperation,
mut after_merge_segment_entry: SegmentEntry, mut after_merge_segment_entry: SegmentEntry,
) -> impl Future<Output = crate::Result<SegmentMeta>> { ) -> Result<()> {
let segment_updater = self.clone(); self.run_async(move |segment_updater| {
let after_merge_segment_meta = after_merge_segment_entry.meta().clone();
let end_merge_future = self.schedule_future(async move {
info!("End merge {:?}", after_merge_segment_entry.meta()); info!("End merge {:?}", after_merge_segment_entry.meta());
{ {
let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone(); let mut delete_cursor = after_merge_segment_entry.delete_cursor().clone();
if let Some(delete_operation) = delete_cursor.get() { if let Some(delete_operation) = delete_cursor.get() {
let committed_opstamp = segment_updater.load_metas().opstamp; let committed_opstamp = segment_updater.load_metas().opstamp;
if delete_operation.opstamp < committed_opstamp { if delete_operation.opstamp < committed_opstamp {
let index = &segment_updater.index; let index = &segment_updater.0.index;
let segment = index.segment(after_merge_segment_entry.meta().clone()); let segment = index.segment(after_merge_segment_entry.meta().clone());
if let Err(e) = advance_deletes( if let Err(e) = advance_deletes(
segment, segment,
@@ -495,26 +480,21 @@ impl SegmentUpdater {
// ... cancel merge // ... cancel merge
// `merge_operations` are tracked. As it is dropped, the // `merge_operations` are tracked. As it is dropped, the
// the segment_ids will be available again for merge. // the segment_ids will be available again for merge.
return Err(e); return;
} }
} }
} }
let previous_metas = segment_updater.load_metas(); let previous_metas = segment_updater.load_metas();
let segments_status = segment_updater segment_updater
.0
.segment_manager .segment_manager
.end_merge(merge_operation.segment_ids(), after_merge_segment_entry)?; .end_merge(merge_operation.segment_ids(), after_merge_segment_entry);
segment_updater.consider_merge_options();
if segments_status == SegmentsStatus::Committed { segment_updater.save_metas(previous_metas.opstamp, previous_metas.payload.clone());
segment_updater
.save_metas(previous_metas.opstamp, previous_metas.payload.clone())?;
}
segment_updater.consider_merge_options().await;
} // we drop all possible handle to a now useless `SegmentMeta`. } // we drop all possible handle to a now useless `SegmentMeta`.
let _ = garbage_collect_files(segment_updater).await; segment_updater.garbage_collect_files_exec();
Ok(()) })
}); .wait()
end_merge_future.map_ok(|_| after_merge_segment_meta)
} }
/// Wait for current merging threads. /// Wait for current merging threads.
@@ -532,9 +512,26 @@ impl SegmentUpdater {
/// ///
/// Obsolete files will eventually be cleaned up /// Obsolete files will eventually be cleaned up
/// by the directory garbage collector. /// by the directory garbage collector.
pub fn wait_merging_thread(&self) -> crate::Result<()> { pub fn wait_merging_thread(&self) -> Result<()> {
self.merge_operations.wait_until_empty(); loop {
Ok(()) let merging_threads: HashMap<usize, JoinHandle<Result<()>>> = {
let mut merging_threads = self.0.merging_threads.write().unwrap();
mem::replace(merging_threads.deref_mut(), HashMap::new())
};
if merging_threads.is_empty() {
return Ok(());
}
debug!("wait merging thread {}", merging_threads.len());
for (_, merging_thread_handle) in merging_threads {
merging_thread_handle
.join()
.map(|_| ())
.map_err(|_| TantivyError::ErrorInThread("Merging thread failed.".into()))?;
}
// Our merging thread may have queued their completed merged segment.
// Let's wait for that too.
self.run_async(move |_| {}).wait()?;
}
} }
} }
@@ -690,6 +687,7 @@ mod tests {
index_writer.segment_updater().remove_all_segments(); index_writer.segment_updater().remove_all_segments();
let seg_vec = index_writer let seg_vec = index_writer
.segment_updater() .segment_updater()
.0
.segment_manager .segment_manager
.segment_entries(); .segment_entries();
assert!(seg_vec.is_empty()); assert!(seg_vec.is_empty());

View File

@@ -3,18 +3,17 @@ use crate::core::Segment;
use crate::core::SerializableSegment; use crate::core::SerializableSegment;
use crate::fastfield::FastFieldsWriter; use crate::fastfield::FastFieldsWriter;
use crate::fieldnorm::FieldNormsWriter; use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::{SegmentSerializer, SegmentSerializerWriters}; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size; use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldEntry;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::Term; use crate::schema::Term;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{Field, FieldEntry};
use crate::tokenizer::BoxedTokenizer; use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::FacetTokenizer; use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::PreTokenizedStream; use crate::tokenizer::{TokenStream, Tokenizer};
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
use crate::DocId; use crate::DocId;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use crate::Result;
@@ -69,13 +68,14 @@ impl SegmentWriter {
schema: &Schema, schema: &Schema,
) -> Result<SegmentWriter> { ) -> Result<SegmentWriter> {
let table_num_bits = initial_table_size(memory_budget)?; let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer_wrts = SegmentSerializerWriters::for_segment(&mut segment)?; let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let segment_serializer = SegmentSerializer::new(segment.schema(), segment_serializer_wrts)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema let tokenizers =
.fields() schema
.map( .fields()
|(_, field_entry): (Field, &FieldEntry)| match field_entry.field_type() { .iter()
.map(FieldEntry::field_type)
.map(|field_type| match *field_type {
FieldType::Str(ref text_options) => text_options FieldType::Str(ref text_options) => text_options
.get_indexing_options() .get_indexing_options()
.and_then(|text_index_option| { .and_then(|text_index_option| {
@@ -83,9 +83,8 @@ impl SegmentWriter {
segment.index().tokenizers().get(tokenizer_name) segment.index().tokenizers().get(tokenizer_name)
}), }),
_ => None, _ => None,
}, })
) .collect();
.collect();
Ok(SegmentWriter { Ok(SegmentWriter {
max_doc: 0, max_doc: 0,
multifield_postings, multifield_postings,
@@ -160,44 +159,26 @@ impl SegmentWriter {
} }
} }
FieldType::Str(_) => { FieldType::Str(_) => {
let mut token_streams: Vec<Box<dyn TokenStream>> = vec![]; let num_tokens = if let Some(ref mut tokenizer) =
let mut offsets = vec![]; self.tokenizers[field.0 as usize]
let mut total_offset = 0; {
let texts: Vec<&str> = field_values
for field_value in field_values { .iter()
match field_value.value() { .flat_map(|field_value| match *field_value.value() {
Value::PreTokStr(tok_str) => { Value::Str(ref text) => Some(text.as_str()),
offsets.push(total_offset); _ => None,
if let Some(last_token) = tok_str.tokens.last() { })
total_offset += last_token.offset_to; .collect();
} if texts.is_empty() {
0
token_streams } else {
.push(Box::new(PreTokenizedStream::from(tok_str.clone()))); let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
} self.multifield_postings
Value::Str(ref text) => { .index_text(doc_id, field, &mut token_stream)
if let Some(ref mut tokenizer) =
self.tokenizers[field.field_id() as usize]
{
offsets.push(total_offset);
total_offset += text.len();
token_streams.push(tokenizer.token_stream(text));
}
}
_ => (),
} }
}
let num_tokens = if token_streams.is_empty() {
0
} else { } else {
let mut token_stream: Box<dyn TokenStream> = 0
Box::new(TokenStreamChain::new(offsets, token_streams));
self.multifield_postings
.index_text(doc_id, field, &mut token_stream)
}; };
self.fieldnorms_writer.record(doc_id, field, num_tokens); self.fieldnorms_writer.record(doc_id, field, num_tokens);
} }
FieldType::U64(ref int_option) => { FieldType::U64(ref int_option) => {
@@ -250,7 +231,6 @@ impl SegmentWriter {
} }
} }
doc.filter_fields(|field| schema.get_field_entry(field).is_stored()); doc.filter_fields(|field| schema.get_field_entry(field).is_stored());
doc.prepare_for_store();
let doc_writer = self.segment_serializer.get_store_writer(); let doc_writer = self.segment_serializer.get_store_writer();
doc_writer.store(&doc)?; doc_writer.store(&doc)?;
self.max_doc += 1; self.max_doc += 1;

151
src/lib.rs Normal file → Executable file
View File

@@ -160,6 +160,7 @@ pub use self::snippet::{Snippet, SnippetGenerator};
mod docset; mod docset;
pub use self::docset::{DocSet, SkipResult}; pub use self::docset::{DocSet, SkipResult};
pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; pub use crate::common::{f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
pub use crate::core::SegmentComponent; pub use crate::core::SegmentComponent;
pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta}; pub use crate::core::{Index, IndexMeta, Searcher, Segment, SegmentId, SegmentMeta};
@@ -169,58 +170,11 @@ pub use crate::indexer::IndexWriter;
pub use crate::postings::Postings; pub use crate::postings::Postings;
pub use crate::reader::LeasedItem; pub use crate::reader::LeasedItem;
pub use crate::schema::{Document, Term}; pub use crate::schema::{Document, Term};
use std::fmt;
use once_cell::sync::Lazy; /// Expose the current version of tantivy, as well
/// whether it was compiled with the simd compression.
/// Index format version. pub fn version() -> &'static str {
const INDEX_FORMAT_VERSION: u32 = 1; env!("CARGO_PKG_VERSION")
/// Structure version for the index.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Version {
major: u32,
minor: u32,
patch: u32,
index_format_version: u32,
store_compression: String,
}
impl fmt::Debug for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
static VERSION: Lazy<Version> = Lazy::new(|| Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
index_format_version: INDEX_FORMAT_VERSION,
store_compression: crate::store::COMPRESSION.to_string(),
});
impl ToString for Version {
fn to_string(&self) -> String {
format!(
"tantivy v{}.{}.{}, index_format v{}, store_compression: {}",
self.major, self.minor, self.patch, self.index_format_version, self.store_compression
)
}
}
static VERSION_STRING: Lazy<String> = Lazy::new(|| VERSION.to_string());
/// Expose the current version of tantivy as found in Cargo.toml during compilation.
/// eg. "0.11.0" as well as the compression scheme used in the docstore.
pub fn version() -> &'static Version {
&VERSION
}
/// Exposes the complete version of tantivy as found in Cargo.toml during compilation as a string.
/// eg. "tantivy v0.11.0, index_format v1, store_compression: lz4".
pub fn version_string() -> &'static str {
VERSION_STRING.as_str()
} }
/// Defines tantivy's merging strategy /// Defines tantivy's merging strategy
@@ -258,13 +212,15 @@ pub type Score = f32;
pub type SegmentLocalId = u32; pub type SegmentLocalId = u32;
impl DocAddress { impl DocAddress {
/// Return the segment ordinal id that identifies the segment /// Return the segment ordinal.
/// hosting the document in the `Searcher` it is called from. /// The segment ordinal is an id identifying the segment
/// hosting the document. It is only meaningful, in the context
/// of a searcher.
pub fn segment_ord(self) -> SegmentLocalId { pub fn segment_ord(self) -> SegmentLocalId {
self.0 self.0
} }
/// Return the segment-local `DocId` /// Return the segment local `DocId`
pub fn doc(self) -> DocId { pub fn doc(self) -> DocId {
self.1 self.1
} }
@@ -273,11 +229,11 @@ impl DocAddress {
/// `DocAddress` contains all the necessary information /// `DocAddress` contains all the necessary information
/// to identify a document given a `Searcher` object. /// to identify a document given a `Searcher` object.
/// ///
/// It consists of an id identifying its segment, and /// It consists in an id identifying its segment, and
/// a segment-local `DocId`. /// its segment-local `DocId`.
/// ///
/// The id used for the segment is actually an ordinal /// The id used for the segment is actually an ordinal
/// in the list of `Segment`s held by a `Searcher`. /// in the list of segment hold by a `Searcher`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct DocAddress(pub SegmentLocalId, pub DocId); pub struct DocAddress(pub SegmentLocalId, pub DocId);
@@ -333,18 +289,6 @@ mod tests {
sample_with_seed(n, ratio, 4) sample_with_seed(n, ratio, 4)
} }
#[test]
#[cfg(not(feature = "lz4"))]
fn test_version_string() {
use regex::Regex;
let regex_ptn = Regex::new(
"tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}",
)
.unwrap();
let version = super::version().to_string();
assert!(regex_ptn.find(&version).is_some());
}
#[test] #[test]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() { fn test_indexing() {
@@ -940,73 +884,4 @@ mod tests {
assert_eq!(fast_field_reader.get(0), 4f64) assert_eq!(fast_field_reader.get(0), 4f64)
} }
} }
// motivated by #729
#[test]
fn test_update_via_delete_insert() {
use crate::collector::Count;
use crate::indexer::NoMergePolicy;
use crate::query::AllQuery;
use crate::SegmentId;
use futures::executor::block_on;
const DOC_COUNT: u64 = 2u64;
let mut schema_builder = SchemaBuilder::default();
let id = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone());
let index_reader = index.reader().unwrap();
let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.set_merge_policy(Box::new(NoMergePolicy));
for doc_id in 0u64..DOC_COUNT {
index_writer.add_document(doc!(id => doc_id));
}
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
// update the 10 elements by deleting and re-adding
for doc_id in 0u64..DOC_COUNT {
index_writer.delete_term(Term::from_field_u64(id, doc_id));
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let doc = doc!(id => doc_id);
index_writer.add_document(doc);
index_writer.commit().unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
// The number of document should be stable.
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
}
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.into_iter()
.map(|reader| reader.segment_id())
.collect();
block_on(index_writer.merge(&segment_ids)).unwrap();
index_reader.reload().unwrap();
let searcher = index_reader.searcher();
assert_eq!(
searcher.search(&AllQuery, &Count).unwrap(),
DOC_COUNT as usize
);
}
} }

View File

@@ -35,9 +35,9 @@
/// let likes = schema_builder.add_u64_field("num_u64", FAST); /// let likes = schema_builder.add_u64_field("num_u64", FAST);
/// let schema = schema_builder.build(); /// let schema = schema_builder.build();
/// let doc = doc!( /// let doc = doc!(
/// title => "Life Aquatic", /// title => "Life Aquatic",
/// author => "Wes Anderson", /// author => "Wes Anderson",
/// likes => 4u64 /// likes => 4u64
/// ); /// );
/// # } /// # }
/// ``` /// ```

View File

@@ -36,10 +36,11 @@ struct Positions {
impl Positions { impl Positions {
pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions { pub fn new(position_source: ReadOnlySource, skip_source: ReadOnlySource) -> Positions {
let (body, footer) = skip_source.split_from_end(u32::SIZE_IN_BYTES); let skip_len = skip_source.len();
let (body, footer) = skip_source.split(skip_len - u32::SIZE_IN_BYTES);
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted"); let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
let (skip_source, long_skip_source) = let body_split = body.len() - u64::SIZE_IN_BYTES * (num_long_skips as usize);
body.split_from_end(u64::SIZE_IN_BYTES * (num_long_skips as usize)); let (skip_source, long_skip_source) = body.split(body_split);
Positions { Positions {
bit_packer: BitPacker4x::new(), bit_packer: BitPacker4x::new(),
skip_source, skip_source,

View File

@@ -75,7 +75,7 @@ pub mod tests {
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut segment = index.new_segment(); let mut segment = index.new_segment();
let mut posting_serializer = InvertedIndexSerializer::for_segment(&mut segment).unwrap(); let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
{ {
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap(); let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
field_serializer.new_term("abc".as_bytes()).unwrap(); field_serializer.new_term("abc".as_bytes()).unwrap();
@@ -356,9 +356,9 @@ pub mod tests {
#[test] #[test]
fn test_skip_next() { fn test_skip_next() {
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0); let term_0 = Term::from_field_u64(Field(0), 0);
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1); let term_1 = Term::from_field_u64(Field(0), 1);
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2); let term_2 = Term::from_field_u64(Field(0), 2);
let num_docs = 300u32; let num_docs = 300u32;
@@ -511,19 +511,19 @@ pub mod tests {
} }
pub static TERM_A: Lazy<Term> = Lazy::new(|| { pub static TERM_A: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "a") Term::from_field_text(field, "a")
}); });
pub static TERM_B: Lazy<Term> = Lazy::new(|| { pub static TERM_B: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "b") Term::from_field_text(field, "b")
}); });
pub static TERM_C: Lazy<Term> = Lazy::new(|| { pub static TERM_C: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "c") Term::from_field_text(field, "c")
}); });
pub static TERM_D: Lazy<Term> = Lazy::new(|| { pub static TERM_D: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "d") Term::from_field_text(field, "d")
}); });

View File

@@ -61,12 +61,12 @@ fn make_field_partition(
.iter() .iter()
.map(|(key, _, _)| Term::wrap(key).field()) .map(|(key, _, _)| Term::wrap(key).field())
.enumerate(); .enumerate();
let mut prev_field_opt = None; let mut prev_field = Field(u32::max_value());
let mut fields = vec![]; let mut fields = vec![];
let mut offsets = vec![]; let mut offsets = vec![];
for (offset, field) in term_offsets_it { for (offset, field) in term_offsets_it {
if Some(field) != prev_field_opt { if field != prev_field {
prev_field_opt = Some(field); prev_field = field;
fields.push(field); fields.push(field);
offsets.push(offset); offsets.push(offset);
} }
@@ -86,7 +86,8 @@ impl MultiFieldPostingsWriter {
let term_index = TermHashMap::new(table_bits); let term_index = TermHashMap::new(table_bits);
let per_field_postings_writers: Vec<_> = schema let per_field_postings_writers: Vec<_> = schema
.fields() .fields()
.map(|(_, field_entry)| posting_from_field_entry(field_entry)) .iter()
.map(|field_entry| posting_from_field_entry(field_entry))
.collect(); .collect();
MultiFieldPostingsWriter { MultiFieldPostingsWriter {
heap: MemoryArena::new(), heap: MemoryArena::new(),
@@ -106,8 +107,7 @@ impl MultiFieldPostingsWriter {
field: Field, field: Field,
token_stream: &mut dyn TokenStream, token_stream: &mut dyn TokenStream,
) -> u32 { ) -> u32 {
let postings_writer = let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut();
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
postings_writer.index_text( postings_writer.index_text(
&mut self.term_index, &mut self.term_index,
doc, doc,
@@ -118,8 +118,7 @@ impl MultiFieldPostingsWriter {
} }
pub fn subscribe(&mut self, doc: DocId, term: &Term) -> UnorderedTermId { pub fn subscribe(&mut self, doc: DocId, term: &Term) -> UnorderedTermId {
let postings_writer = let postings_writer = self.per_field_postings_writers[term.field().0 as usize].deref_mut();
self.per_field_postings_writers[term.field().field_id() as usize].deref_mut();
postings_writer.subscribe(&mut self.term_index, doc, 0u32, term, &mut self.heap) postings_writer.subscribe(&mut self.term_index, doc, 0u32, term, &mut self.heap)
} }
@@ -161,7 +160,7 @@ impl MultiFieldPostingsWriter {
FieldType::Bytes => {} FieldType::Bytes => {}
} }
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize]; let postings_writer = &self.per_field_postings_writers[field.0 as usize];
let mut field_serializer = let mut field_serializer =
serializer.new_field(field, postings_writer.total_num_tokens())?; serializer.new_field(field, postings_writer.total_num_tokens())?;
postings_writer.serialize( postings_writer.serialize(

View File

@@ -10,8 +10,8 @@ use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::DocId;
use crate::Result; use crate::Result;
use crate::{Directory, DocId};
use std::io::{self, Write}; use std::io::{self, Write};
/// `InvertedIndexSerializer` is in charge of serializing /// `InvertedIndexSerializer` is in charge of serializing
@@ -54,36 +54,33 @@ pub struct InvertedIndexSerializer {
} }
impl InvertedIndexSerializer { impl InvertedIndexSerializer {
pub(crate) fn for_segment(segment: &mut Segment) -> crate::Result<Self> { /// Open a new `InvertedIndexSerializer` for the given segment
let schema = segment.schema(); fn create(
use crate::core::SegmentComponent; terms_write: CompositeWrite<WritePtr>,
let terms_wrt = segment.open_write(SegmentComponent::TERMS)?; postings_write: CompositeWrite<WritePtr>,
let postings_wrt = segment.open_write(SegmentComponent::POSTINGS)?; positions_write: CompositeWrite<WritePtr>,
let positions_wrt = segment.open_write(SegmentComponent::POSITIONS)?; positionsidx_write: CompositeWrite<WritePtr>,
let positions_idx_wrt = segment.open_write(SegmentComponent::POSITIONSSKIP)?;
Ok(Self::open(
schema,
terms_wrt,
postings_wrt,
positions_wrt,
positions_idx_wrt,
))
}
/// Open a new `PostingsSerializer` for the given segment
pub(crate) fn open(
schema: Schema, schema: Schema,
terms_wrt: WritePtr, ) -> Result<InvertedIndexSerializer> {
postings_wrt: WritePtr, Ok(InvertedIndexSerializer {
positions_wrt: WritePtr, terms_write,
positions_idx_wrt: WritePtr, postings_write,
) -> InvertedIndexSerializer { positions_write,
InvertedIndexSerializer { positionsidx_write,
terms_write: CompositeWrite::wrap(terms_wrt),
postings_write: CompositeWrite::wrap(postings_wrt),
positions_write: CompositeWrite::wrap(positions_wrt),
positionsidx_write: CompositeWrite::wrap(positions_idx_wrt),
schema, schema,
} })
}
/// Open a new `PostingsSerializer` for the given segment
pub fn open(segment: &mut Segment) -> Result<InvertedIndexSerializer> {
use crate::SegmentComponent::{POSITIONS, POSITIONSSKIP, POSTINGS, TERMS};
InvertedIndexSerializer::create(
CompositeWrite::wrap(segment.open_write(TERMS)?),
CompositeWrite::wrap(segment.open_write(POSTINGS)?),
CompositeWrite::wrap(segment.open_write(POSITIONS)?),
CompositeWrite::wrap(segment.open_write(POSITIONSSKIP)?),
segment.schema(),
)
} }
/// Must be called before starting pushing terms of /// Must be called before starting pushing terms of

View File

@@ -9,8 +9,7 @@ use crate::Result;
use crate::Searcher; use crate::Searcher;
use std::collections::BTreeSet; use std::collections::BTreeSet;
/// The boolean query returns a set of documents /// The boolean query combines a set of queries
/// that matches the Boolean combination of constituent subqueries.
/// ///
/// The documents matched by the boolean query are /// The documents matched by the boolean query are
/// those which /// those which
@@ -20,113 +19,6 @@ use std::collections::BTreeSet;
/// `MustNot` occurence. /// `MustNot` occurence.
/// * match at least one of the subqueries that is not /// * match at least one of the subqueries that is not
/// a `MustNot` occurence. /// a `MustNot` occurence.
///
///
/// You can combine other query types and their `Occur`ances into one `BooleanQuery`
///
/// ```rust
///use tantivy::collector::Count;
///use tantivy::doc;
///use tantivy::query::{BooleanQuery, Occur, PhraseQuery, Query, TermQuery};
///use tantivy::schema::{IndexRecordOption, Schema, TEXT};
///use tantivy::Term;
///use tantivy::{Index, Result};
///
///fn main() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let body = schema_builder.add_text_field("body", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// body => "hidden",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// body => "found",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// // Make TermQuery's for "girl" and "diary" in the title
/// let girl_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "girl"),
/// IndexRecordOption::Basic,
/// ));
/// let diary_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// ));
/// // A TermQuery with "found" in the body
/// let body_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(body, "found"),
/// IndexRecordOption::Basic,
/// ));
/// // TermQuery "diary" must and "girl" must not be present
/// let queries_with_occurs1 = vec![
/// (Occur::Must, diary_term_query.box_clone()),
/// (Occur::MustNot, girl_term_query),
/// ];
/// // Make a BooleanQuery equivalent to
/// // title:+diary title:-girl
/// let diary_must_and_girl_mustnot = BooleanQuery::from(queries_with_occurs1);
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
/// assert_eq!(count1, 1);
///
/// // TermQuery for "cow" in the title
/// let cow_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "cow"),
/// IndexRecordOption::Basic,
/// ));
/// // "title:diary OR title:cow"
/// let title_diary_or_cow = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, cow_term_query),
/// ]);
/// let count2 = searcher.search(&title_diary_or_cow, &Count)?;
/// assert_eq!(count2, 4);
///
/// // Make a `PhraseQuery` from a vector of `Term`s
/// let phrase_query: Box<dyn Query> = Box::new(PhraseQuery::new(vec![
/// Term::from_field_text(title, "dairy"),
/// Term::from_field_text(title, "cow"),
/// ]));
/// // You can combine subqueries of different types into 1 BooleanQuery:
/// // `TermQuery` and `PhraseQuery`
/// // "title:diary OR "dairy cow"
/// let term_of_phrase_query = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, phrase_query.box_clone()),
/// ]);
/// let count3 = searcher.search(&term_of_phrase_query, &Count)?;
/// assert_eq!(count3, 4);
///
/// // You can nest one BooleanQuery inside another
/// // body:found AND ("title:diary OR "dairy cow")
/// let nested_query = BooleanQuery::from(vec![
/// (Occur::Must, body_term_query),
/// (Occur::Must, Box::new(term_of_phrase_query))
/// ]);
/// let count4 = searcher.search(&nested_query, &Count)?;
/// assert_eq!(count4, 1);
/// Ok(())
///}
/// ```
#[derive(Debug)] #[derive(Debug)]
pub struct BooleanQuery { pub struct BooleanQuery {
subqueries: Vec<(Occur, Box<dyn Query>)>, subqueries: Vec<(Occur, Box<dyn Query>)>,

View File

@@ -54,21 +54,21 @@ where
match self.excluding_state { match self.excluding_state {
State::ExcludeOne(excluded_doc) => { State::ExcludeOne(excluded_doc) => {
if doc == excluded_doc { if doc == excluded_doc {
return false; false
} } else if excluded_doc > doc {
if excluded_doc > doc { true
return true; } else {
} match self.excluding_docset.skip_next(doc) {
match self.excluding_docset.skip_next(doc) { SkipResult::OverStep => {
SkipResult::OverStep => { self.excluding_state = State::ExcludeOne(self.excluding_docset.doc());
self.excluding_state = State::ExcludeOne(self.excluding_docset.doc()); true
true }
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
} }
SkipResult::End => {
self.excluding_state = State::Finished;
true
}
SkipResult::Reached => false,
} }
} }
State::Finished => true, State::Finished => true,

View File

@@ -33,6 +33,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Result, Term}; /// use tantivy::{doc, Index, Result, Term};
/// ///
/// # fn main() { example().unwrap(); }
/// fn example() -> Result<()> { /// fn example() -> Result<()> {
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT); /// let title = schema_builder.add_text_field("title", TEXT);
@@ -58,6 +59,7 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
/// ///
/// { /// {
///
/// let term = Term::from_field_text(title, "Diary"); /// let term = Term::from_field_text(title, "Diary");
/// let query = FuzzyTermQuery::new(term, 1, true); /// let query = FuzzyTermQuery::new(term, 1, true);
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap(); /// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
@@ -67,7 +69,6 @@ static LEV_BUILDER: Lazy<HashMap<(u8, bool), LevenshteinAutomatonBuilder>> = Laz
/// ///
/// Ok(()) /// Ok(())
/// } /// }
/// # assert!(example().is_ok());
/// ``` /// ```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct FuzzyTermQuery { pub struct FuzzyTermQuery {

View File

@@ -40,7 +40,7 @@ impl PhraseQuery {
PhraseQuery::new_with_offset(terms_with_offset) PhraseQuery::new_with_offset(terms_with_offset)
} }
/// Creates a new `PhraseQuery` given a list of terms and their offsets. /// Creates a new `PhraseQuery` given a list of terms and there offsets.
/// ///
/// Can be used to provide custom offset for each term. /// Can be used to provide custom offset for each term.
pub fn new_with_offset(mut terms: Vec<(usize, Term)>) -> PhraseQuery { pub fn new_with_offset(mut terms: Vec<(usize, Term)>) -> PhraseQuery {
@@ -73,7 +73,7 @@ impl PhraseQuery {
.collect::<Vec<Term>>() .collect::<Vec<Term>>()
} }
/// Returns the `PhraseWeight` for the given phrase query given a specific `searcher`. /// Returns the `PhraseWeight` for the given phrase query given a specific `searcher`.
/// ///
/// This function is the same as `.weight(...)` except it returns /// This function is the same as `.weight(...)` except it returns
/// a specialized type `PhraseWeight` instead of a Boxed trait. /// a specialized type `PhraseWeight` instead of a Boxed trait.

View File

@@ -4,7 +4,6 @@ use crate::postings::Postings;
use crate::query::bm25::BM25Weight; use crate::query::bm25::BM25Weight;
use crate::query::{Intersection, Scorer}; use crate::query::{Intersection, Scorer};
use crate::DocId; use crate::DocId;
use std::cmp::Ordering;
struct PostingsWithOffset<TPostings> { struct PostingsWithOffset<TPostings> {
offset: u32, offset: u32,
@@ -60,16 +59,12 @@ fn intersection_exists(left: &[u32], right: &[u32]) -> bool {
while left_i < left.len() && right_i < right.len() { while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
match left_val.cmp(&right_val) { if left_val < right_val {
Ordering::Less => { left_i += 1;
left_i += 1; } else if right_val < left_val {
} right_i += 1;
Ordering::Equal => { } else {
return true; return true;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
false false
@@ -82,18 +77,14 @@ fn intersection_count(left: &[u32], right: &[u32]) -> usize {
while left_i < left.len() && right_i < right.len() { while left_i < left.len() && right_i < right.len() {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
match left_val.cmp(&right_val) { if left_val < right_val {
Ordering::Less => { left_i += 1;
left_i += 1; } else if right_val < left_val {
} right_i += 1;
Ordering::Equal => { } else {
count += 1; count += 1;
left_i += 1; left_i += 1;
right_i += 1; right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
count count
@@ -112,19 +103,15 @@ fn intersection(left: &mut [u32], right: &[u32]) -> usize {
while left_i < left_len && right_i < right_len { while left_i < left_len && right_i < right_len {
let left_val = left[left_i]; let left_val = left[left_i];
let right_val = right[right_i]; let right_val = right[right_i];
match left_val.cmp(&right_val) { if left_val < right_val {
Ordering::Less => { left_i += 1;
left_i += 1; } else if right_val < left_val {
} right_i += 1;
Ordering::Equal => { } else {
left[count] = left_val; left[count] = left_val;
count += 1; count += 1;
left_i += 1; left_i += 1;
right_i += 1; right_i += 1;
}
Ordering::Greater => {
right_i += 1;
}
} }
} }
count count

View File

@@ -8,7 +8,7 @@ use crate::query::PhraseQuery;
use crate::query::Query; use crate::query::Query;
use crate::query::RangeQuery; use crate::query::RangeQuery;
use crate::query::TermQuery; use crate::query::TermQuery;
use crate::schema::{Facet, IndexRecordOption}; use crate::schema::IndexRecordOption;
use crate::schema::{Field, Schema}; use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term}; use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager; use crate::tokenizer::TokenizerManager;
@@ -319,10 +319,7 @@ impl QueryParser {
)) ))
} }
} }
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => Ok(vec![(0, Term::from_field_text(field, phrase))]),
let facet = Facet::from_text(phrase);
Ok(vec![(0, Term::from_field_text(field, facet.encoded_str()))])
}
FieldType::Bytes => { FieldType::Bytes => {
let field_name = self.schema.get_field_name(field).to_string(); let field_name = self.schema.get_field_name(field).to_string();
Err(QueryParserError::FieldNotIndexed(field_name)) Err(QueryParserError::FieldNotIndexed(field_name))
@@ -557,7 +554,6 @@ mod test {
schema_builder.add_text_field("with_stop_words", text_options); schema_builder.add_text_field("with_stop_words", text_options);
schema_builder.add_date_field("date", INDEXED); schema_builder.add_date_field("date", INDEXED);
schema_builder.add_f64_field("float", INDEXED); schema_builder.add_f64_field("float", INDEXED);
schema_builder.add_facet_field("facet");
let schema = schema_builder.build(); let schema = schema_builder.build();
let default_fields = vec![title, text]; let default_fields = vec![title, text];
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
@@ -592,13 +588,9 @@ mod test {
} }
#[test] #[test]
pub fn test_parse_query_facet() { pub fn test_parse_query_simple() {
let query_parser = make_query_parser(); let query_parser = make_query_parser();
let query = query_parser.parse_query("facet:/root/branch/leaf").unwrap(); assert!(query_parser.parse_query("toto").is_ok());
assert_eq!(
format!("{:?}", query),
"TermQuery(Term(field=11,bytes=[114, 111, 111, 116, 0, 98, 114, 97, 110, 99, 104, 0, 108, 101, 97, 102]))"
);
} }
#[test] #[test]
@@ -682,19 +674,13 @@ mod test {
test_parse_query_to_logical_ast_helper( test_parse_query_to_logical_ast_helper(
"signed:-2324", "signed:-2324",
&format!( &format!("{:?}", Term::from_field_i64(Field(2u32), -2324)),
"{:?}",
Term::from_field_i64(Field::from_field_id(2u32), -2324)
),
false, false,
); );
test_parse_query_to_logical_ast_helper( test_parse_query_to_logical_ast_helper(
"float:2.5", "float:2.5",
&format!( &format!("{:?}", Term::from_field_f64(Field(10u32), 2.5)),
"{:?}",
Term::from_field_f64(Field::from_field_id(10u32), 2.5)
),
false, false,
); );
} }

View File

@@ -38,33 +38,41 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # Example /// # Example
/// ///
/// ```rust /// ```rust
/// use tantivy::collector::Count; /// # use tantivy::collector::Count;
/// use tantivy::query::RangeQuery; /// # use tantivy::query::RangeQuery;
/// use tantivy::schema::{Schema, INDEXED}; /// # use tantivy::schema::{Schema, INDEXED};
/// use tantivy::{doc, Index}; /// # use tantivy::{doc, Index, Result};
/// # fn test() -> tantivy::Result<()> { /// #
/// let mut schema_builder = Schema::builder(); /// # fn run() -> Result<()> {
/// let year_field = schema_builder.add_u64_field("year", INDEXED); /// # let mut schema_builder = Schema::builder();
/// let schema = schema_builder.build(); /// # let year_field = schema_builder.add_u64_field("year", INDEXED);
/// /// # let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema); /// #
/// let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?; /// # let index = Index::create_in_ram(schema);
/// for year in 1950u64..2017u64 { /// # {
/// let num_docs_within_year = 10 + (year - 1950) * (year - 1950); /// # let mut index_writer = index.writer_with_num_threads(1, 6_000_000).unwrap();
/// for _ in 0..num_docs_within_year { /// # for year in 1950u64..2017u64 {
/// index_writer.add_document(doc!(year_field => year)); /// # let num_docs_within_year = 10 + (year - 1950) * (year - 1950);
/// } /// # for _ in 0..num_docs_within_year {
/// } /// # index_writer.add_document(doc!(year_field => year));
/// index_writer.commit()?; /// # }
/// /// # }
/// let reader = index.reader()?; /// # index_writer.commit().unwrap();
/// # }
/// # let reader = index.reader()?;
/// let searcher = reader.searcher(); /// let searcher = reader.searcher();
///
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970); /// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
///
/// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?; /// let num_60s_books = searcher.search(&docs_in_the_sixties, &Count)?;
/// assert_eq!(num_60s_books, 2285); ///
/// Ok(()) /// # assert_eq!(num_60s_books, 2285);
/// # Ok(())
/// # }
/// #
/// # fn main() {
/// # run().unwrap()
/// # } /// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RangeQuery { pub struct RangeQuery {

View File

@@ -15,40 +15,40 @@ use tantivy_fst::Regex;
/// use tantivy::collector::Count; /// use tantivy::collector::Count;
/// use tantivy::query::RegexQuery; /// use tantivy::query::RegexQuery;
/// use tantivy::schema::{Schema, TEXT}; /// use tantivy::schema::{Schema, TEXT};
/// use tantivy::{doc, Index, Term}; /// use tantivy::{doc, Index, Result, Term};
/// ///
/// # fn test() -> tantivy::Result<()> { /// # fn main() { example().unwrap(); }
/// let mut schema_builder = Schema::builder(); /// fn example() -> Result<()> {
/// let title = schema_builder.add_text_field("title", TEXT); /// let mut schema_builder = Schema::builder();
/// let schema = schema_builder.build(); /// let title = schema_builder.add_text_field("title", TEXT);
/// let index = Index::create_in_ram(schema); /// let schema = schema_builder.build();
/// { /// let index = Index::create_in_ram(schema);
/// let mut index_writer = index.writer(3_000_000)?; /// {
/// index_writer.add_document(doc!( /// let mut index_writer = index.writer(3_000_000)?;
/// title => "The Name of the Wind", /// index_writer.add_document(doc!(
/// )); /// title => "The Name of the Wind",
/// index_writer.add_document(doc!( /// ));
/// title => "The Diary of Muadib", /// index_writer.add_document(doc!(
/// )); /// title => "The Diary of Muadib",
/// index_writer.add_document(doc!( /// ));
/// title => "A Dairy Cow", /// index_writer.add_document(doc!(
/// )); /// title => "A Dairy Cow",
/// index_writer.add_document(doc!( /// ));
/// title => "The Diary of a Young Girl", /// index_writer.add_document(doc!(
/// )); /// title => "The Diary of a Young Girl",
/// index_writer.commit().unwrap(); /// ));
/// index_writer.commit().unwrap();
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// } /// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::from_pattern("d[ai]{2}ry", title)?;
/// let count = searcher.search(&query, &Count)?;
/// assert_eq!(count, 3);
/// Ok(())
/// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct RegexQuery { pub struct RegexQuery {

View File

@@ -118,7 +118,7 @@ mod tests {
#[test] #[test]
fn test_term_query_debug() { fn test_term_query_debug() {
let term_query = TermQuery::new( let term_query = TermQuery::new(
Term::from_field_text(Field::from_field_id(1), "hello"), Term::from_field_text(Field(1), "hello"),
IndexRecordOption::WithFreqs, IndexRecordOption::WithFreqs,
); );
assert_eq!( assert_eq!(

View File

@@ -23,39 +23,42 @@ use std::fmt;
/// use tantivy::collector::{Count, TopDocs}; /// use tantivy::collector::{Count, TopDocs};
/// use tantivy::query::TermQuery; /// use tantivy::query::TermQuery;
/// use tantivy::schema::{Schema, TEXT, IndexRecordOption}; /// use tantivy::schema::{Schema, TEXT, IndexRecordOption};
/// use tantivy::{doc, Index, Term}; /// use tantivy::{doc, Index, Result, Term};
/// # fn test() -> tantivy::Result<()> { ///
/// let mut schema_builder = Schema::builder(); /// # fn main() { example().unwrap(); }
/// let title = schema_builder.add_text_field("title", TEXT); /// fn example() -> Result<()> {
/// let schema = schema_builder.build(); /// let mut schema_builder = Schema::builder();
/// let index = Index::create_in_ram(schema); /// let title = schema_builder.add_text_field("title", TEXT);
/// { /// let schema = schema_builder.build();
/// let mut index_writer = index.writer(3_000_000)?; /// let index = Index::create_in_ram(schema);
/// index_writer.add_document(doc!( /// {
/// title => "The Name of the Wind", /// let mut index_writer = index.writer(3_000_000)?;
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "The Name of the Wind",
/// title => "The Diary of Muadib", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "The Diary of Muadib",
/// title => "A Dairy Cow", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.add_document(doc!( /// title => "A Dairy Cow",
/// title => "The Diary of a Young Girl", /// ));
/// )); /// index_writer.add_document(doc!(
/// index_writer.commit()?; /// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit()?;
/// }
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count)).unwrap();
/// assert_eq!(count, 2);
///
/// Ok(())
/// } /// }
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
/// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// );
/// let (top_docs, count) = searcher.search(&query, &(TopDocs::with_limit(2), Count))?;
/// assert_eq!(count, 2);
/// Ok(())
/// # }
/// # assert!(test().is_ok());
/// ``` /// ```
#[derive(Clone)] #[derive(Clone)]
pub struct TermQuery { pub struct TermQuery {

View File

@@ -162,11 +162,6 @@ pub struct IndexReader {
} }
impl IndexReader { impl IndexReader {
#[cfg(test)]
pub(crate) fn index(&self) -> Index {
self.inner.index.clone()
}
/// Update searchers so that they reflect the state of the last /// Update searchers so that they reflect the state of the last
/// `.commit()`. /// `.commit()`.
/// ///

View File

@@ -167,7 +167,7 @@ mod tests {
use super::Pool; use super::Pool;
use super::Queue; use super::Queue;
use std::{iter, mem}; use std::iter;
#[test] #[test]
fn test_pool() { fn test_pool() {
@@ -197,67 +197,33 @@ mod tests {
fn test_pool_dont_panic_on_empty_pop() { fn test_pool_dont_panic_on_empty_pop() {
// When the object pool is exhausted, it shouldn't panic on pop() // When the object pool is exhausted, it shouldn't panic on pop()
use std::sync::Arc; use std::sync::Arc;
use std::thread; use std::{thread, time};
// Wrap the pool in an Arc, same way as its used in `core/index.rs` // Wrap the pool in an Arc, same way as its used in `core/index.rs`
let pool1 = Arc::new(Pool::new()); let pool = Arc::new(Pool::new());
// clone pools outside the move scope of each new thread // clone pools outside the move scope of each new thread
let pool2 = Arc::clone(&pool1); let pool1 = Arc::clone(&pool);
let pool3 = Arc::clone(&pool1); let pool2 = Arc::clone(&pool);
let elements_for_pool = vec![1, 2]; let elements_for_pool = vec![1, 2];
pool1.publish_new_generation(elements_for_pool); pool.publish_new_generation(elements_for_pool);
let mut threads = vec![]; let mut threads = vec![];
let sleep_dur = time::Duration::from_millis(10);
// spawn one more thread than there are elements in the pool // spawn one more thread than there are elements in the pool
let (start_1_send, start_1_recv) = crossbeam::bounded(0);
let (start_2_send, start_2_recv) = crossbeam::bounded(0);
let (start_3_send, start_3_recv) = crossbeam::bounded(0);
let (event_send1, event_recv) = crossbeam::unbounded();
let event_send2 = event_send1.clone();
let event_send3 = event_send1.clone();
threads.push(thread::spawn(move || { threads.push(thread::spawn(move || {
assert_eq!(start_1_recv.recv(), Ok("start")); // leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool.acquire();
thread::sleep(sleep_dur);
}));
threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool1.acquire(); let _leased_searcher = &pool1.acquire();
assert!(event_send1.send("1 acquired").is_ok()); thread::sleep(sleep_dur);
assert_eq!(start_1_recv.recv(), Ok("stop"));
assert!(event_send1.send("1 stopped").is_ok());
mem::drop(_leased_searcher);
})); }));
threads.push(thread::spawn(move || { threads.push(thread::spawn(move || {
assert_eq!(start_2_recv.recv(), Ok("start")); // leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool2.acquire(); let _leased_searcher = &pool2.acquire();
assert!(event_send2.send("2 acquired").is_ok()); thread::sleep(sleep_dur);
assert_eq!(start_2_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);
assert!(event_send2.send("2 stopped").is_ok());
})); }));
threads.push(thread::spawn(move || {
assert_eq!(start_3_recv.recv(), Ok("start"));
let _leased_searcher = &pool3.acquire();
assert!(event_send3.send("3 acquired").is_ok());
assert_eq!(start_3_recv.recv(), Ok("stop"));
mem::drop(_leased_searcher);
assert!(event_send3.send("3 stopped").is_ok());
}));
assert!(start_1_send.send("start").is_ok());
assert_eq!(event_recv.recv(), Ok("1 acquired"));
assert!(start_2_send.send("start").is_ok());
assert_eq!(event_recv.recv(), Ok("2 acquired"));
assert!(start_3_send.send("start").is_ok());
assert!(event_recv.try_recv().is_err());
assert!(start_1_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("1 stopped"));
assert_eq!(event_recv.recv(), Ok("3 acquired"));
assert!(start_3_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("3 stopped"));
assert!(start_2_send.send("stop").is_ok());
assert_eq!(event_recv.recv(), Ok("2 stopped"));
} }
} }

View File

@@ -1,7 +1,6 @@
use super::*; use super::*;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::VInt; use crate::common::VInt;
use crate::tokenizer::PreTokenizedString;
use crate::DateTime; use crate::DateTime;
use itertools::Itertools; use itertools::Itertools;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
@@ -30,8 +29,8 @@ impl From<Vec<FieldValue>> for Document {
impl PartialEq for Document { impl PartialEq for Document {
fn eq(&self, other: &Document) -> bool { fn eq(&self, other: &Document) -> bool {
// super slow, but only here for tests // super slow, but only here for tests
let mut self_field_values: Vec<&_> = self.field_values.iter().collect(); let mut self_field_values = self.field_values.clone();
let mut other_field_values: Vec<&_> = other.field_values.iter().collect(); let mut other_field_values = other.field_values.clone();
self_field_values.sort(); self_field_values.sort();
other_field_values.sort(); other_field_values.sort();
self_field_values.eq(&other_field_values) self_field_values.eq(&other_field_values)
@@ -79,16 +78,6 @@ impl Document {
self.add(FieldValue::new(field, value)); self.add(FieldValue::new(field, value));
} }
/// Add a pre-tokenized text field.
pub fn add_pre_tokenized_text(
&mut self,
field: Field,
pre_tokenized_text: &PreTokenizedString,
) {
let value = Value::PreTokStr(pre_tokenized_text.clone());
self.add(FieldValue::new(field, value));
}
/// Add a u64 field /// Add a u64 field
pub fn add_u64(&mut self, field: Field, value: u64) { pub fn add_u64(&mut self, field: Field, value: u64) {
self.add(FieldValue::new(field, Value::U64(value))); self.add(FieldValue::new(field, Value::U64(value)));
@@ -155,21 +144,6 @@ impl Document {
.find(|field_value| field_value.field() == field) .find(|field_value| field_value.field() == field)
.map(FieldValue::value) .map(FieldValue::value)
} }
/// Prepares Document for being stored in the document store
///
/// Method transforms PreTokenizedString values into String
/// values.
pub fn prepare_for_store(&mut self) {
for field_value in &mut self.field_values {
if let Value::PreTokStr(pre_tokenized_text) = field_value.value() {
*field_value = FieldValue::new(
field_value.field(),
Value::Str(pre_tokenized_text.text.clone()), //< TODO somehow remove .clone()
);
}
}
}
} }
impl BinarySerializable for Document { impl BinarySerializable for Document {
@@ -195,7 +169,6 @@ impl BinarySerializable for Document {
mod tests { mod tests {
use crate::schema::*; use crate::schema::*;
use crate::tokenizer::{PreTokenizedString, Token};
#[test] #[test]
fn test_doc() { fn test_doc() {
@@ -205,38 +178,4 @@ mod tests {
doc.add_text(text_field, "My title"); doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1); assert_eq!(doc.field_values().len(), 1);
} }
#[test]
fn test_prepare_for_store() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("title", TEXT);
let mut doc = Document::default();
let pre_tokenized_text = PreTokenizedString {
text: String::from("A"),
tokens: vec![Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
}],
};
doc.add_pre_tokenized_text(text_field, &pre_tokenized_text);
doc.add_text(text_field, "title");
doc.prepare_for_store();
assert_eq!(doc.field_values().len(), 2);
match doc.field_values()[0].value() {
Value::Str(ref text) => assert_eq!(text, "A"),
_ => panic!("Incorrect variant of Value"),
}
match doc.field_values()[1].value() {
Value::Str(ref text) => assert_eq!(text, "title"),
_ => panic!("Incorrect variant of Value"),
}
}
} }

View File

@@ -3,23 +3,14 @@ use std::io;
use std::io::Read; use std::io::Read;
use std::io::Write; use std::io::Write;
/// `Field` is represented by an unsigned 32-bit integer type /// `Field` is actually a `u8` identifying a `Field`
/// The schema holds the mapping between field names and `Field` objects. /// The schema is in charge of holding mapping between field names
/// to `Field` objects.
///
/// Because the field id is a `u8`, tantivy can only have at most `255` fields.
/// Value 255 is reserved.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)] #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
pub struct Field(u32); pub struct Field(pub u32);
impl Field {
/// Create a new field object for the given FieldId.
pub fn from_field_id(field_id: u32) -> Field {
Field(field_id)
}
/// Returns a u32 identifying uniquely a field within a schema.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn field_id(&self) -> u32 {
self.0
}
}
impl BinarySerializable for Field { impl BinarySerializable for Field {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {

View File

@@ -1,12 +1,11 @@
use base64::decode; use base64::decode;
use crate::schema::{IntOptions, TextOptions};
use crate::schema::Facet; use crate::schema::Facet;
use crate::schema::IndexRecordOption; use crate::schema::IndexRecordOption;
use crate::schema::TextFieldIndexing; use crate::schema::TextFieldIndexing;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{IntOptions, TextOptions};
use crate::tokenizer::PreTokenizedString;
use chrono::{FixedOffset, Utc};
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
/// Possible error that may occur while parsing a field value /// Possible error that may occur while parsing a field value
@@ -125,20 +124,13 @@ impl FieldType {
pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> { pub fn value_from_json(&self, json: &JsonValue) -> Result<Value, ValueParsingError> {
match *json { match *json {
JsonValue::String(ref field_text) => match *self { JsonValue::String(ref field_text) => match *self {
FieldType::Date(_) => {
let dt_with_fixed_tz: chrono::DateTime<FixedOffset> =
chrono::DateTime::parse_from_rfc3339(field_text).map_err(|err|
ValueParsingError::TypeError(format!(
"Failed to parse date from JSON. Expected rfc3339 format, got {}. {:?}",
field_text, err
))
)?;
Ok(Value::Date(dt_with_fixed_tz.with_timezone(&Utc)))
}
FieldType::Str(_) => Ok(Value::Str(field_text.clone())), FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) => Err( FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {
ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)), Err(ValueParsingError::TypeError(format!(
), "Expected an integer, got {:?}",
json
)))
}
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))), FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| { FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
ValueParsingError::InvalidBase64(format!( ValueParsingError::InvalidBase64(format!(
@@ -177,28 +169,6 @@ impl FieldType {
Err(ValueParsingError::TypeError(msg)) Err(ValueParsingError::TypeError(msg))
} }
}, },
JsonValue::Object(_) => match *self {
FieldType::Str(_) => {
if let Ok(tok_str_val) =
serde_json::from_value::<PreTokenizedString>(json.clone())
{
Ok(Value::PreTokStr(tok_str_val))
} else {
let msg = format!(
"Json value {:?} cannot be translated to PreTokenizedString.",
json
);
Err(ValueParsingError::TypeError(msg))
}
}
_ => {
let msg = format!(
"Json value not supported error {:?}. Expected {:?}",
json, self
);
Err(ValueParsingError::TypeError(msg))
}
},
_ => { _ => {
let msg = format!( let msg = format!(
"Json value not supported error {:?}. Expected {:?}", "Json value not supported error {:?}. Expected {:?}",
@@ -214,37 +184,7 @@ impl FieldType {
mod tests { mod tests {
use super::FieldType; use super::FieldType;
use crate::schema::field_type::ValueParsingError; use crate::schema::field_type::ValueParsingError;
use crate::schema::TextOptions;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{Schema, INDEXED};
use crate::tokenizer::{PreTokenizedString, Token};
use crate::{DateTime, Document};
use chrono::{NaiveDate, NaiveDateTime, NaiveTime, Utc};
#[test]
fn test_deserialize_json_date() {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", INDEXED);
let schema = schema_builder.build();
let doc_json = r#"{"date": "2019-10-12T07:20:50.52+02:00"}"#;
let doc = schema.parse_document(doc_json).unwrap();
let date = doc.get_first(date_field).unwrap();
assert_eq!(format!("{:?}", date), "Date(2019-10-12T05:20:50.520Z)");
}
#[test]
fn test_serialize_json_date() {
let mut doc = Document::new();
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field("date", INDEXED);
let schema = schema_builder.build();
let naive_date = NaiveDate::from_ymd(1982, 9, 17);
let naive_time = NaiveTime::from_hms(13, 20, 00);
let date_time = DateTime::from_utc(NaiveDateTime::new(naive_date, naive_time), Utc);
doc.add_date(date_field, &date_time);
let doc_json = schema.to_json(&doc);
assert_eq!(doc_json, r#"{"date":["1982-09-17T13:20:00+00:00"]}"#);
}
#[test] #[test]
fn test_bytes_value_from_json() { fn test_bytes_value_from_json() {
@@ -265,71 +205,4 @@ mod tests {
_ => panic!("Expected parse failure for invalid base64"), _ => panic!("Expected parse failure for invalid base64"),
} }
} }
#[test]
fn test_pre_tok_str_value_from_json() {
let pre_tokenized_string_json = r#"{
"text": "The Old Man",
"tokens": [
{
"offset_from": 0,
"offset_to": 3,
"position": 0,
"text": "The",
"position_length": 1
},
{
"offset_from": 4,
"offset_to": 7,
"position": 1,
"text": "Old",
"position_length": 1
},
{
"offset_from": 8,
"offset_to": 11,
"position": 2,
"text": "Man",
"position_length": 1
}
]
}"#;
let expected_value = Value::PreTokStr(PreTokenizedString {
text: String::from("The Old Man"),
tokens: vec![
Token {
offset_from: 0,
offset_to: 3,
position: 0,
text: String::from("The"),
position_length: 1,
},
Token {
offset_from: 4,
offset_to: 7,
position: 1,
text: String::from("Old"),
position_length: 1,
},
Token {
offset_from: 8,
offset_to: 11,
position: 2,
text: String::from("Man"),
position_length: 1,
},
],
});
let deserialized_value = FieldType::Str(TextOptions::default())
.value_from_json(&serde_json::from_str(pre_tokenized_string_json).unwrap())
.unwrap();
assert_eq!(deserialized_value, expected_value);
let serialized_value_json = serde_json::to_string_pretty(&expected_value).unwrap();
assert_eq!(serialized_value_json, pre_tokenized_string_json);
}
} }

View File

@@ -53,7 +53,7 @@ where
fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output { fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output {
SchemaFlagList { SchemaFlagList {
head: head.head, head: head.head,
tail: self, tail: self.clone(),
} }
} }
} }

View File

@@ -44,7 +44,7 @@ We can split the problem of generating a search result page into two phases :
the search results page. (`doc_ids[] -> Document[]`) the search results page. (`doc_ids[] -> Document[]`)
In the first phase, the ability to search for documents by the given field is determined by the In the first phase, the ability to search for documents by the given field is determined by the
[`IndexRecordOption`](enum.IndexRecordOption.html) of our [`TextIndexingOptions`](enum.TextIndexingOptions.html) of our
[`TextOptions`](struct.TextOptions.html). [`TextOptions`](struct.TextOptions.html).
The effect of each possible setting is described more in detail The effect of each possible setting is described more in detail

View File

@@ -166,8 +166,8 @@ impl SchemaBuilder {
} }
/// Adds a field entry to the schema in build. /// Adds a field entry to the schema in build.
pub fn add_field(&mut self, field_entry: FieldEntry) -> Field { fn add_field(&mut self, field_entry: FieldEntry) -> Field {
let field = Field::from_field_id(self.fields.len() as u32); let field = Field(self.fields.len() as u32);
let field_name = field_entry.name().to_string(); let field_name = field_entry.name().to_string();
self.fields.push(field_entry); self.fields.push(field_entry);
self.fields_map.insert(field_name, field); self.fields_map.insert(field_name, field);
@@ -223,7 +223,7 @@ pub struct Schema(Arc<InnerSchema>);
impl Schema { impl Schema {
/// Return the `FieldEntry` associated to a `Field`. /// Return the `FieldEntry` associated to a `Field`.
pub fn get_field_entry(&self, field: Field) -> &FieldEntry { pub fn get_field_entry(&self, field: Field) -> &FieldEntry {
&self.0.fields[field.field_id() as usize] &self.0.fields[field.0 as usize]
} }
/// Return the field name for a given `Field`. /// Return the field name for a given `Field`.
@@ -232,12 +232,8 @@ impl Schema {
} }
/// Return the list of all the `Field`s. /// Return the list of all the `Field`s.
pub fn fields(&self) -> impl Iterator<Item = (Field, &FieldEntry)> { pub fn fields(&self) -> &[FieldEntry] {
self.0 &self.0.fields
.fields
.iter()
.enumerate()
.map(|(field_id, field_entry)| (Field::from_field_id(field_id as u32), field_entry))
} }
/// Creates a new builder. /// Creates a new builder.
@@ -401,7 +397,6 @@ pub enum DocParsingError {
mod tests { mod tests {
use crate::schema::field_type::ValueParsingError; use crate::schema::field_type::ValueParsingError;
use crate::schema::int_options::Cardinality::SingleValue;
use crate::schema::schema::DocParsingError::NotJSON; use crate::schema::schema::DocParsingError::NotJSON;
use crate::schema::*; use crate::schema::*;
use matches::{assert_matches, matches}; use matches::{assert_matches, matches};
@@ -490,32 +485,13 @@ mod tests {
let schema: Schema = serde_json::from_str(expected).unwrap(); let schema: Schema = serde_json::from_str(expected).unwrap();
let mut fields = schema.fields(); let mut fields = schema.fields().iter();
{
let (field, field_entry) = fields.next().unwrap(); assert_eq!("title", fields.next().unwrap().name());
assert_eq!("title", field_entry.name()); assert_eq!("author", fields.next().unwrap().name());
assert_eq!(0, field.field_id()); assert_eq!("count", fields.next().unwrap().name());
} assert_eq!("popularity", fields.next().unwrap().name());
{ assert_eq!("score", fields.next().unwrap().name());
let (field, field_entry) = fields.next().unwrap();
assert_eq!("author", field_entry.name());
assert_eq!(1, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("count", field_entry.name());
assert_eq!(2, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("popularity", field_entry.name());
assert_eq!(3, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("score", field_entry.name());
assert_eq!(4, field.field_id());
}
assert!(fields.next().is_none()); assert!(fields.next().is_none());
} }
@@ -716,94 +692,4 @@ mod tests {
assert_matches!(json_err, Err(NotJSON(_))); assert_matches!(json_err, Err(NotJSON(_)));
} }
} }
#[test]
pub fn test_schema_add_field() {
let mut schema_builder = SchemaBuilder::default();
let id_options = TextOptions::default().set_stored().set_indexing_options(
TextFieldIndexing::default()
.set_tokenizer("raw")
.set_index_option(IndexRecordOption::Basic),
);
let timestamp_options = IntOptions::default()
.set_stored()
.set_indexed()
.set_fast(SingleValue);
schema_builder.add_text_field("_id", id_options);
schema_builder.add_date_field("_timestamp", timestamp_options);
let schema_content = r#"[
{
"name": "text",
"type": "text",
"options": {
"indexing": {
"record": "position",
"tokenizer": "default"
},
"stored": false
}
},
{
"name": "popularity",
"type": "i64",
"options": {
"indexed": false,
"fast": "single",
"stored": true
}
}
]"#;
let tmp_schema: Schema =
serde_json::from_str(&schema_content).expect("error while reading json");
for (_field, field_entry) in tmp_schema.fields() {
schema_builder.add_field(field_entry.clone());
}
let schema = schema_builder.build();
let schema_json = serde_json::to_string_pretty(&schema).unwrap();
let expected = r#"[
{
"name": "_id",
"type": "text",
"options": {
"indexing": {
"record": "basic",
"tokenizer": "raw"
},
"stored": true
}
},
{
"name": "_timestamp",
"type": "date",
"options": {
"indexed": true,
"fast": "single",
"stored": true
}
},
{
"name": "text",
"type": "text",
"options": {
"indexing": {
"record": "position",
"tokenizer": "default"
},
"stored": false
}
},
{
"name": "popularity",
"type": "i64",
"options": {
"indexed": false,
"fast": "single",
"stored": true
}
}
]"#;
assert_eq!(schema_json, expected);
}
} }

View File

@@ -105,7 +105,7 @@ impl Term {
if self.0.len() < 4 { if self.0.len() < 4 {
self.0.resize(4, 0u8); self.0.resize(4, 0u8);
} }
BigEndian::write_u32(&mut self.0[0..4], field.field_id()); BigEndian::write_u32(&mut self.0[0..4], field.0);
} }
/// Sets a u64 value in the term. /// Sets a u64 value in the term.
@@ -157,7 +157,7 @@ where
/// Returns the field. /// Returns the field.
pub fn field(&self) -> Field { pub fn field(&self) -> Field {
Field::from_field_id(BigEndian::read_u32(&self.0.as_ref()[..4])) Field(BigEndian::read_u32(&self.0.as_ref()[..4]))
} }
/// Returns the `u64` value stored in a term. /// Returns the `u64` value stored in a term.
@@ -227,7 +227,7 @@ impl fmt::Debug for Term {
write!( write!(
f, f,
"Term(field={},bytes={:?})", "Term(field={},bytes={:?})",
self.field().field_id(), self.field().0,
self.value_bytes() self.value_bytes()
) )
} }

View File

@@ -1,5 +1,4 @@
use crate::schema::Facet; use crate::schema::Facet;
use crate::tokenizer::PreTokenizedString;
use crate::DateTime; use crate::DateTime;
use serde::de::Visitor; use serde::de::Visitor;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -11,8 +10,6 @@ use std::{cmp::Ordering, fmt};
pub enum Value { pub enum Value {
/// The str type is used for any text information. /// The str type is used for any text information.
Str(String), Str(String),
/// Pre-tokenized str type,
PreTokStr(PreTokenizedString),
/// Unsigned 64-bits Integer `u64` /// Unsigned 64-bits Integer `u64`
U64(u64), U64(u64),
/// Signed 64-bits Integer `i64` /// Signed 64-bits Integer `i64`
@@ -32,7 +29,6 @@ impl Ord for Value {
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
match (self, other) { match (self, other) {
(Value::Str(l), Value::Str(r)) => l.cmp(r), (Value::Str(l), Value::Str(r)) => l.cmp(r),
(Value::PreTokStr(l), Value::PreTokStr(r)) => l.cmp(r),
(Value::U64(l), Value::U64(r)) => l.cmp(r), (Value::U64(l), Value::U64(r)) => l.cmp(r),
(Value::I64(l), Value::I64(r)) => l.cmp(r), (Value::I64(l), Value::I64(r)) => l.cmp(r),
(Value::Date(l), Value::Date(r)) => l.cmp(r), (Value::Date(l), Value::Date(r)) => l.cmp(r),
@@ -48,8 +44,6 @@ impl Ord for Value {
} }
(Value::Str(_), _) => Ordering::Less, (Value::Str(_), _) => Ordering::Less,
(_, Value::Str(_)) => Ordering::Greater, (_, Value::Str(_)) => Ordering::Greater,
(Value::PreTokStr(_), _) => Ordering::Less,
(_, Value::PreTokStr(_)) => Ordering::Greater,
(Value::U64(_), _) => Ordering::Less, (Value::U64(_), _) => Ordering::Less,
(_, Value::U64(_)) => Ordering::Greater, (_, Value::U64(_)) => Ordering::Greater,
(Value::I64(_), _) => Ordering::Less, (Value::I64(_), _) => Ordering::Less,
@@ -71,11 +65,10 @@ impl Serialize for Value {
{ {
match *self { match *self {
Value::Str(ref v) => serializer.serialize_str(v), Value::Str(ref v) => serializer.serialize_str(v),
Value::PreTokStr(ref v) => v.serialize(serializer),
Value::U64(u) => serializer.serialize_u64(u), Value::U64(u) => serializer.serialize_u64(u),
Value::I64(u) => serializer.serialize_i64(u), Value::I64(u) => serializer.serialize_i64(u),
Value::F64(u) => serializer.serialize_f64(u), Value::F64(u) => serializer.serialize_f64(u),
Value::Date(ref date) => serializer.serialize_str(&date.to_rfc3339()), Value::Date(ref date) => serializer.serialize_i64(date.timestamp()),
Value::Facet(ref facet) => facet.serialize(serializer), Value::Facet(ref facet) => facet.serialize(serializer),
Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes), Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes),
} }
@@ -96,14 +89,14 @@ impl<'de> Deserialize<'de> for Value {
formatter.write_str("a string or u32") formatter.write_str("a string or u32")
} }
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
Ok(Value::I64(v))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> { fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
Ok(Value::U64(v)) Ok(Value::U64(v))
} }
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> {
Ok(Value::I64(v))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> { fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> {
Ok(Value::F64(v)) Ok(Value::F64(v))
} }
@@ -131,15 +124,6 @@ impl Value {
} }
} }
/// Returns the tokenized text, provided the value is of the `PreTokStr` type.
/// (Returns None if the value is not of the `PreTokStr` type).
pub fn tokenized_text(&self) -> Option<&PreTokenizedString> {
match *self {
Value::PreTokStr(ref tok_text) => Some(tok_text),
_ => None,
}
}
/// Returns the u64-value, provided the value is of the `U64` type. /// Returns the u64-value, provided the value is of the `U64` type.
/// ///
/// # Panics /// # Panics
@@ -209,8 +193,8 @@ impl From<f64> for Value {
} }
} }
impl From<crate::DateTime> for Value { impl From<DateTime> for Value {
fn from(date_time: crate::DateTime) -> Value { fn from(date_time: DateTime) -> Value {
Value::Date(date_time) Value::Date(date_time)
} }
} }
@@ -233,17 +217,10 @@ impl From<Vec<u8>> for Value {
} }
} }
impl From<PreTokenizedString> for Value {
fn from(pretokenized_string: PreTokenizedString) -> Value {
Value::PreTokStr(pretokenized_string)
}
}
mod binary_serialize { mod binary_serialize {
use super::Value; use super::Value;
use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable}; use crate::common::{f64_to_u64, u64_to_f64, BinarySerializable};
use crate::schema::Facet; use crate::schema::Facet;
use crate::tokenizer::PreTokenizedString;
use chrono::{TimeZone, Utc}; use chrono::{TimeZone, Utc};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
@@ -254,11 +231,6 @@ mod binary_serialize {
const BYTES_CODE: u8 = 4; const BYTES_CODE: u8 = 4;
const DATE_CODE: u8 = 5; const DATE_CODE: u8 = 5;
const F64_CODE: u8 = 6; const F64_CODE: u8 = 6;
const EXT_CODE: u8 = 7;
// extended types
const TOK_STR_CODE: u8 = 0;
impl BinarySerializable for Value { impl BinarySerializable for Value {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
@@ -267,18 +239,6 @@ mod binary_serialize {
TEXT_CODE.serialize(writer)?; TEXT_CODE.serialize(writer)?;
text.serialize(writer) text.serialize(writer)
} }
Value::PreTokStr(ref tok_str) => {
EXT_CODE.serialize(writer)?;
TOK_STR_CODE.serialize(writer)?;
if let Ok(text) = serde_json::to_string(tok_str) {
text.serialize(writer)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Failed to dump Value::PreTokStr(_) to json.",
))
}
}
Value::U64(ref val) => { Value::U64(ref val) => {
U64_CODE.serialize(writer)?; U64_CODE.serialize(writer)?;
val.serialize(writer) val.serialize(writer)
@@ -330,30 +290,6 @@ mod binary_serialize {
} }
HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)), HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)),
BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)), BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)),
EXT_CODE => {
let ext_type_code = u8::deserialize(reader)?;
match ext_type_code {
TOK_STR_CODE => {
let str_val = String::deserialize(reader)?;
if let Ok(value) = serde_json::from_str::<PreTokenizedString>(&str_val)
{
Ok(Value::PreTokStr(value))
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Failed to parse string data as Value::PreTokStr(_).",
))
}
}
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"No extened field type is associated with code {:?}",
ext_type_code
),
)),
}
}
_ => Err(io::Error::new( _ => Err(io::Error::new(
io::ErrorKind::InvalidData, io::ErrorKind::InvalidData,
format!("No field type is associated with code {:?}", type_code), format!("No field type is associated with code {:?}", type_code),
@@ -362,17 +298,3 @@ mod binary_serialize {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::Value;
use crate::DateTime;
use std::str::FromStr;
#[test]
fn test_serialize_date() {
let value = Value::Date(DateTime::from_str("1996-12-20T00:39:57+00:00").unwrap());
let serialized_value_json = serde_json::to_string_pretty(&value).unwrap();
assert_eq!(serialized_value_json, r#""1996-12-20T00:39:57+00:00""#);
}
}

View File

@@ -331,8 +331,9 @@ mod tests {
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::iter::Iterator; use std::iter::Iterator;
const TEST_TEXT: &'static str = r#"Rust is a systems programming language sponsored by const TEST_TEXT: &'static str =
Mozilla which describes it as a "safe, concurrent, practical language", supporting functional and r#"Rust is a systems programming language sponsored by Mozilla which
describes it as a "safe, concurrent, practical language", supporting functional and
imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?], imperative-procedural paradigms. Rust is syntactically similar to C++[according to whom?],
but its designers intend it to provide better memory safety while still maintaining but its designers intend it to provide better memory safety while still maintaining
performance. performance.
@@ -362,13 +363,13 @@ Survey in 2016, 2017, and 2018."#;
let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT); let snippet = select_best_fragment_combination(&fragments[..], &TEST_TEXT);
assert_eq!( assert_eq!(
snippet.fragments, snippet.fragments,
"Rust is a systems programming language sponsored by\n\ "Rust is a systems programming language sponsored by \
Mozilla which describes it as a \"safe" Mozilla which\ndescribes it as a \"safe"
); );
assert_eq!( assert_eq!(
snippet.to_html(), snippet.to_html(),
"<b>Rust</b> is a systems programming <b>language</b> \ "<b>Rust</b> is a systems programming <b>language</b> \
sponsored by\nMozilla which describes it as a &quot;safe" sponsored by Mozilla which\ndescribes it as a &quot;safe"
) )
} }

View File

@@ -1,9 +1,6 @@
use std::io::{self, Read, Write}; extern crate lz4;
/// Name of the compression scheme used in the doc store. use std::io::{self, Read, Write};
///
/// This name is appended to the version string of tantivy.
pub const COMPRESSION: &'static str = "lz4";
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> { pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
compressed.clear(); compressed.clear();

View File

@@ -2,11 +2,6 @@ use snap;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
/// Name of the compression scheme used in the doc store.
///
/// This name is appended to the version string of tantivy.
pub const COMPRESSION: &str = "snappy";
pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> { pub fn compress(uncompressed: &[u8], compressed: &mut Vec<u8>) -> io::Result<()> {
compressed.clear(); compressed.clear();
let mut encoder = snap::Writer::new(compressed); let mut encoder = snap::Writer::new(compressed);

View File

@@ -42,22 +42,18 @@ pub use self::writer::StoreWriter;
#[cfg(feature = "lz4")] #[cfg(feature = "lz4")]
mod compression_lz4; mod compression_lz4;
#[cfg(feature = "lz4")] #[cfg(feature = "lz4")]
pub use self::compression_lz4::COMPRESSION; use self::compression_lz4::*;
#[cfg(feature = "lz4")]
use self::compression_lz4::{compress, decompress};
#[cfg(not(feature = "lz4"))] #[cfg(not(feature = "lz4"))]
mod compression_snap; mod compression_snap;
#[cfg(not(feature = "lz4"))] #[cfg(not(feature = "lz4"))]
pub use self::compression_snap::COMPRESSION; use self::compression_snap::*;
#[cfg(not(feature = "lz4"))]
use self::compression_snap::{compress, decompress};
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::schema::Document; use crate::schema::Document;
use crate::schema::FieldValue; use crate::schema::FieldValue;
use crate::schema::Schema; use crate::schema::Schema;

View File

@@ -36,7 +36,7 @@ pub use self::termdict::{TermDictionary, TermDictionaryBuilder};
mod tests { mod tests {
use super::{TermDictionary, TermDictionaryBuilder, TermStreamer}; use super::{TermDictionary, TermDictionaryBuilder, TermStreamer};
use crate::core::Index; use crate::core::Index;
use crate::directory::{Directory, RAMDirectory, ReadOnlyDirectory, ReadOnlySource}; use crate::directory::{Directory, RAMDirectory, ReadOnlySource};
use crate::postings::TermInfo; use crate::postings::TermInfo;
use crate::schema::{Document, FieldType, Schema, TEXT}; use crate::schema::{Document, FieldType, Schema, TEXT};
use std::path::PathBuf; use std::path::PathBuf;

View File

@@ -2,6 +2,8 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//!
//! let tokenizer = RawTokenizer //! let tokenizer = RawTokenizer
//! .filter(AlphaNumOnlyFilter); //! .filter(AlphaNumOnlyFilter);
//! //!
@@ -18,6 +20,7 @@
//! assert!(stream.next().is_some()); //! assert!(stream.next().is_some());
//! // the "emoji" is dropped because its not an alphanum //! // the "emoji" is dropped because its not an alphanum
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};

View File

@@ -7,6 +7,7 @@
//! ```rust //! ```rust
//! use tantivy::schema::*; //! use tantivy::schema::*;
//! //!
//! # fn main() {
//! let mut schema_builder = Schema::builder(); //! let mut schema_builder = Schema::builder();
//! //!
//! let text_options = TextOptions::default() //! let text_options = TextOptions::default()
@@ -30,6 +31,7 @@
//! schema_builder.add_text_field("uuid", id_options); //! schema_builder.add_text_field("uuid", id_options);
//! //!
//! let schema = schema_builder.build(); //! let schema = schema_builder.build();
//! # }
//! ``` //! ```
//! //!
//! By default, `tantivy` offers the following tokenizers: //! By default, `tantivy` offers the following tokenizers:
@@ -64,10 +66,12 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//! let en_stem = SimpleTokenizer //! let en_stem = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(40)) //! .filter(RemoveLongFilter::limit(40))
//! .filter(LowerCaser) //! .filter(LowerCaser)
//! .filter(Stemmer::new(Language::English)); //! .filter(Stemmer::new(Language::English));
//! # }
//! ``` //! ```
//! //!
//! Once your tokenizer is defined, you need to //! Once your tokenizer is defined, you need to
@@ -77,12 +81,13 @@
//! # use tantivy::schema::Schema; //! # use tantivy::schema::Schema;
//! # use tantivy::tokenizer::*; //! # use tantivy::tokenizer::*;
//! # use tantivy::Index; //! # use tantivy::Index;
//! # //! # fn main() {
//! let custom_en_tokenizer = SimpleTokenizer; //! # let custom_en_tokenizer = SimpleTokenizer;
//! # let schema = Schema::builder().build(); //! # let schema = Schema::builder().build();
//! let index = Index::create_in_ram(schema); //! let index = Index::create_in_ram(schema);
//! index.tokenizers() //! index.tokenizers()
//! .register("custom_en", custom_en_tokenizer); //! .register("custom_en", custom_en_tokenizer);
//! # }
//! ``` //! ```
//! //!
//! If you built your schema programmatically, a complete example //! If you built your schema programmatically, a complete example
@@ -97,6 +102,7 @@
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! use tantivy::Index; //! use tantivy::Index;
//! //!
//! # fn main() {
//! let mut schema_builder = Schema::builder(); //! let mut schema_builder = Schema::builder();
//! let text_field_indexing = TextFieldIndexing::default() //! let text_field_indexing = TextFieldIndexing::default()
//! .set_tokenizer("custom_en") //! .set_tokenizer("custom_en")
@@ -115,6 +121,8 @@
//! index //! index
//! .tokenizers() //! .tokenizers()
//! .register("custom_en", custom_en_tokenizer); //! .register("custom_en", custom_en_tokenizer);
//! // ...
//! # }
//! ``` //! ```
//! //!
mod alphanum_only; mod alphanum_only;
@@ -128,7 +136,6 @@ mod simple_tokenizer;
mod stemmer; mod stemmer;
mod stop_word_filter; mod stop_word_filter;
mod token_stream_chain; mod token_stream_chain;
mod tokenized_string;
mod tokenizer; mod tokenizer;
mod tokenizer_manager; mod tokenizer_manager;
@@ -145,9 +152,7 @@ pub use self::stop_word_filter::StopWordFilter;
pub(crate) use self::token_stream_chain::TokenStreamChain; pub(crate) use self::token_stream_chain::TokenStreamChain;
pub use self::tokenizer::BoxedTokenizer; pub use self::tokenizer::BoxedTokenizer;
pub use self::tokenized_string::{PreTokenizedStream, PreTokenizedString};
pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer}; pub use self::tokenizer::{Token, TokenFilter, TokenStream, Tokenizer};
pub use self::tokenizer_manager::TokenizerManager; pub use self::tokenizer_manager::TokenizerManager;
/// Maximum authorized len (in bytes) for a token. /// Maximum authorized len (in bytes) for a token.

View File

@@ -31,7 +31,7 @@ use super::{Token, TokenStream, Tokenizer};
/// ///
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// /// # fn main() {
/// let tokenizer = NgramTokenizer::new(2, 3, false); /// let tokenizer = NgramTokenizer::new(2, 3, false);
/// let mut stream = tokenizer.token_stream("hello"); /// let mut stream = tokenizer.token_stream("hello");
/// { /// {
@@ -77,6 +77,7 @@ use super::{Token, TokenStream, Tokenizer};
/// assert_eq!(token.offset_to, 5); /// assert_eq!(token.offset_to, 5);
/// } /// }
/// assert!(stream.next().is_none()); /// assert!(stream.next().is_none());
/// # }
/// ``` /// ```
#[derive(Clone)] #[derive(Clone)]
pub struct NgramTokenizer { pub struct NgramTokenizer {

View File

@@ -2,6 +2,8 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//!
//! let tokenizer = SimpleTokenizer //! let tokenizer = SimpleTokenizer
//! .filter(RemoveLongFilter::limit(5)); //! .filter(RemoveLongFilter::limit(5));
//! //!
@@ -10,6 +12,7 @@
//! // out of the token stream. //! // out of the token stream.
//! assert_eq!(stream.next().unwrap().text, "nice"); //! assert_eq!(stream.next().unwrap().text, "nice");
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
//! //!
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};

View File

@@ -15,7 +15,6 @@ pub enum Language {
Greek, Greek,
Hungarian, Hungarian,
Italian, Italian,
Norwegian,
Portuguese, Portuguese,
Romanian, Romanian,
Russian, Russian,
@@ -39,7 +38,6 @@ impl Language {
Greek => Algorithm::Greek, Greek => Algorithm::Greek,
Hungarian => Algorithm::Hungarian, Hungarian => Algorithm::Hungarian,
Italian => Algorithm::Italian, Italian => Algorithm::Italian,
Norwegian => Algorithm::Norwegian,
Portuguese => Algorithm::Portuguese, Portuguese => Algorithm::Portuguese,
Romanian => Algorithm::Romanian, Romanian => Algorithm::Romanian,
Russian => Algorithm::Russian, Russian => Algorithm::Russian,

View File

@@ -2,6 +2,7 @@
//! ```rust //! ```rust
//! use tantivy::tokenizer::*; //! use tantivy::tokenizer::*;
//! //!
//! # fn main() {
//! let tokenizer = SimpleTokenizer //! let tokenizer = SimpleTokenizer
//! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()])); //! .filter(StopWordFilter::remove(vec!["the".to_string(), "is".to_string()]));
//! //!
@@ -9,6 +10,7 @@
//! assert_eq!(stream.next().unwrap().text, "fox"); //! assert_eq!(stream.next().unwrap().text, "fox");
//! assert_eq!(stream.next().unwrap().text, "crafty"); //! assert_eq!(stream.next().unwrap().text, "crafty");
//! assert!(stream.next().is_none()); //! assert!(stream.next().is_none());
//! # }
//! ``` //! ```
use super::{Token, TokenFilter, TokenStream}; use super::{Token, TokenFilter, TokenStream};
use fnv::FnvHasher; use fnv::FnvHasher;
@@ -44,7 +46,7 @@ impl StopWordFilter {
"there", "these", "they", "this", "to", "was", "will", "with", "there", "these", "they", "this", "to", "was", "will", "with",
]; ];
StopWordFilter::remove(words.iter().map(|&s| s.to_string()).collect()) StopWordFilter::remove(words.iter().map(|s| s.to_string()).collect())
} }
} }

View File

@@ -1,189 +0,0 @@
use crate::tokenizer::{Token, TokenStream, TokenStreamChain};
use std::cmp::Ordering;
/// Struct representing pre-tokenized text
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct PreTokenizedString {
/// Original text
pub text: String,
/// Tokens derived from the text
pub tokens: Vec<Token>,
}
impl Ord for PreTokenizedString {
fn cmp(&self, other: &Self) -> Ordering {
self.text.cmp(&other.text)
}
}
impl PartialOrd for PreTokenizedString {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// TokenStream implementation which wraps PreTokenizedString
pub struct PreTokenizedStream {
tokenized_string: PreTokenizedString,
current_token: i64,
}
impl From<PreTokenizedString> for PreTokenizedStream {
fn from(s: PreTokenizedString) -> PreTokenizedStream {
PreTokenizedStream {
tokenized_string: s,
current_token: -1,
}
}
}
impl PreTokenizedStream {
/// Creates a TokenStream from PreTokenizedString array
pub fn chain_tokenized_strings<'a>(
tok_strings: &'a [&'a PreTokenizedString],
) -> Box<dyn TokenStream + 'a> {
if tok_strings.len() == 1 {
Box::new(PreTokenizedStream::from((*tok_strings[0]).clone()))
} else {
let mut offsets = vec![];
let mut total_offset = 0;
for &tok_string in tok_strings {
offsets.push(total_offset);
if let Some(last_token) = tok_string.tokens.last() {
total_offset += last_token.offset_to;
}
}
let token_streams: Vec<_> = tok_strings
.iter()
.map(|tok_string| PreTokenizedStream::from((*tok_string).clone()))
.collect();
Box::new(TokenStreamChain::new(offsets, token_streams))
}
}
}
impl TokenStream for PreTokenizedStream {
fn advance(&mut self) -> bool {
self.current_token += 1;
self.current_token < self.tokenized_string.tokens.len() as i64
}
fn token(&self) -> &Token {
assert!(
self.current_token >= 0,
"TokenStream not initialized. You should call advance() at least once."
);
&self.tokenized_string.tokens[self.current_token as usize]
}
fn token_mut(&mut self) -> &mut Token {
assert!(
self.current_token >= 0,
"TokenStream not initialized. You should call advance() at least once."
);
&mut self.tokenized_string.tokens[self.current_token as usize]
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::Token;
#[test]
fn test_tokenized_stream() {
let tok_text = PreTokenizedString {
text: String::from("A a"),
tokens: vec![
Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 2,
offset_to: 3,
position: 1,
text: String::from("a"),
position_length: 1,
},
],
};
let mut token_stream = PreTokenizedStream::from(tok_text.clone());
for expected_token in tok_text.tokens {
assert!(token_stream.advance());
assert_eq!(token_stream.token(), &expected_token);
}
assert!(!token_stream.advance());
}
#[test]
fn test_chain_tokenized_strings() {
let tok_text = PreTokenizedString {
text: String::from("A a"),
tokens: vec![
Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 2,
offset_to: 3,
position: 1,
text: String::from("a"),
position_length: 1,
},
],
};
let chain_parts = vec![&tok_text, &tok_text];
let mut token_stream = PreTokenizedStream::chain_tokenized_strings(&chain_parts[..]);
let expected_tokens = vec![
Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 2,
offset_to: 3,
position: 1,
text: String::from("a"),
position_length: 1,
},
Token {
offset_from: 3,
offset_to: 4,
position: 3,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 5,
offset_to: 6,
position: 4,
text: String::from("a"),
position_length: 1,
},
];
for expected_token in expected_tokens {
assert!(token_stream.advance());
assert_eq!(token_stream.token(), &expected_token);
}
assert!(!token_stream.advance());
}
}

View File

@@ -4,7 +4,7 @@ use crate::tokenizer::TokenStreamChain;
use std::borrow::{Borrow, BorrowMut}; use std::borrow::{Borrow, BorrowMut};
/// Token /// Token
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] #[derive(Debug, Clone)]
pub struct Token { pub struct Token {
/// Offset (byte index) of the first character of the token. /// Offset (byte index) of the first character of the token.
/// Offsets shall not be modified by token filters. /// Offsets shall not be modified by token filters.
@@ -58,10 +58,12 @@ pub trait Tokenizer<'a>: Sized + Clone {
/// ```rust /// ```rust
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// # fn main() {
/// let en_stem = SimpleTokenizer /// let en_stem = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser) /// .filter(LowerCaser)
/// .filter(Stemmer::default()); /// .filter(Stemmer::default());
/// # }
/// ``` /// ```
/// ///
fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self> fn filter<NewFilter>(self, new_filter: NewFilter) -> ChainTokenizer<NewFilter, Self>
@@ -186,6 +188,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// ``` /// ```
/// use tantivy::tokenizer::*; /// use tantivy::tokenizer::*;
/// ///
/// # fn main() {
/// let tokenizer = SimpleTokenizer /// let tokenizer = SimpleTokenizer
/// .filter(RemoveLongFilter::limit(40)) /// .filter(RemoveLongFilter::limit(40))
/// .filter(LowerCaser); /// .filter(LowerCaser);
@@ -204,6 +207,7 @@ impl<'b> TokenStream for Box<dyn TokenStream + 'b> {
/// assert_eq!(token.offset_to, 12); /// assert_eq!(token.offset_to, 12);
/// assert_eq!(token.position, 1); /// assert_eq!(token.position, 1);
/// } /// }
/// # }
/// ``` /// ```
/// ///
pub trait TokenStream { pub trait TokenStream {
@@ -223,15 +227,17 @@ pub trait TokenStream {
/// and `.token()`. /// and `.token()`.
/// ///
/// ``` /// ```
/// use tantivy::tokenizer::*; /// # use tantivy::tokenizer::*;
/// /// #
/// let tokenizer = SimpleTokenizer /// # fn main() {
/// .filter(RemoveLongFilter::limit(40)) /// # let tokenizer = SimpleTokenizer
/// .filter(LowerCaser); /// # .filter(RemoveLongFilter::limit(40))
/// # .filter(LowerCaser);
/// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer"); /// let mut token_stream = tokenizer.token_stream("Hello, happy tax payer");
/// while let Some(token) = token_stream.next() { /// while let Some(token) = token_stream.next() {
/// println!("Token {:?}", token.text); /// println!("Token {:?}", token.text);
/// } /// }
/// # }
/// ``` /// ```
fn next(&mut self) -> Option<&Token> { fn next(&mut self) -> Option<&Token> {
if self.advance() { if self.advance() {

View File

@@ -1,8 +1,7 @@
use fail; use fail;
use std::io::Write;
use std::path::Path; use std::path::Path;
use tantivy::directory::{ use tantivy::directory::{Directory, ManagedDirectory, RAMDirectory, TerminatingWrite};
Directory, ManagedDirectory, RAMDirectory, ReadOnlyDirectory, TerminatingWrite,
};
use tantivy::doc; use tantivy::doc;
use tantivy::schema::{Schema, TEXT}; use tantivy::schema::{Schema, TEXT};
use tantivy::{Index, Term}; use tantivy::{Index, Term};
@@ -30,11 +29,11 @@ fn test_failpoints_managed_directory_gc_if_delete_fails() {
// The initial 1*off is there to allow for the removal of the // The initial 1*off is there to allow for the removal of the
// lock file. // lock file.
fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap(); fail::cfg("RAMDirectory::delete", "1*off->1*return").unwrap();
assert!(managed_directory.garbage_collect(Default::default).is_ok()); managed_directory.garbage_collect(Default::default);
assert!(managed_directory.exists(test_path)); assert!(managed_directory.exists(test_path));
// running the gc a second time should remove the file. // running the gc a second time should remove the file.
assert!(managed_directory.garbage_collect(Default::default).is_ok()); managed_directory.garbage_collect(Default::default);
assert!( assert!(
!managed_directory.exists(test_path), !managed_directory.exists(test_path),
"The file should have been deleted" "The file should have been deleted"