Compare commits

..

43 Commits

Author SHA1 Message Date
Paul Masurel
4c846b1202 Added NRT directory kinda working 2019-04-05 10:07:29 +09:00
Paul Masurel
fac0013454 Added flush 2019-04-04 09:36:06 +09:00
Paul Masurel
95db5d9999 Merge branch 'master' into softcommits 2019-03-23 18:08:07 +09:00
Paul Masurel
e3abb4481b broken link 2019-03-22 09:58:28 +09:00
Paul Masurel
bfa61d2f2f Added patreon button 2019-03-22 09:51:00 +09:00
Paul Masurel
6c0e621fdb Added bench info in README 2019-03-21 09:35:04 +09:00
Paul Masurel
a8cc5208f1 Linear simd (#519)
* linear simd search within block
2019-03-20 22:10:05 +09:00
Paul Masurel
83eb0d0cb7 Disabling tests on Android 2019-03-20 10:24:17 +09:00
Paul Masurel
ee6e273365 cleanup for nodefaultfeatures 2019-03-20 10:04:42 +09:00
Paul Masurel
6ea34b3d53 Fix version 2019-03-20 09:39:24 +09:00
Paul Masurel
22cf1004bd Reenabled test on android 2019-03-20 08:54:52 +09:00
Paul Masurel
5768d93171 Rename try to attempt as try is becoming a keyword in rust 2019-03-20 08:54:19 +09:00
Paul Masurel
663dd89c05 Feature/reader (#517)
Adding IndexReader to the API. Making it possible to watch for changes.

* Closes #500
2019-03-20 08:39:22 +09:00
barrotsteindev
a934577168 WIP: date field (#487)
* initial version, still a work in progress

* remove redudant or

* add chrono::DateTime and index i64

* add more tests

* fix tests

* pass DateTime by ptr

* remove println!

* document query_parser rfc 3339 date support

* added some more docs about implementation to schema.rs

* enforce DateTime is UTC, and re-export chrono

* added DateField to changelog

* fixed conflict

* use INDEXED instead of INT_INDEXED for date fields
2019-03-15 22:10:37 +09:00
Paul Masurel
94f1885334 Issue/513 (#514)
* Closes #513

* Clean up and doc

* Updated changelog
2019-03-07 09:39:30 +09:00
Jonathan Fok kan
2ccfdb97b5 WIP: compiling to wasm (#512)
* First work to enable compile to wasm

* Added back fst-regex/mmap to mmap feature

* Removed fst-regex. Forced uuid version 0.7.2.
2019-03-06 10:40:54 +09:00
Paul Masurel
e67883138d Cargo fmt 2019-03-06 10:31:00 +09:00
Paul Masurel
f5c65f1f60 Added comment on the constructor fo TopDocSByField 2019-03-06 10:30:37 +09:00
Mauri de Souza Nunes
ec73a9a284 Remove note about panicking in get_field docs (#503)
Since get_field rely on calling get on the underlying InnerSchema HashMap
it shouldn't fail if the field was not found, it simply returns None.
2019-02-28 09:23:00 +09:00
Thomas Schaller
a814a31f1e Remove semicolon from doc! expansion (#509) 2019-02-28 09:20:43 +09:00
Paul Masurel
9acadb3756 Code cleaning 2019-02-26 10:50:36 +09:00
Paul Masurel
774fcecf23 cargo fmt 2019-02-26 10:44:59 +09:00
Paul Masurel
27c9fa6028 Jannickj prove bug with facets (#508)
* prove bug with facets

* Closing #505

Introduce a term id in the TermHashMap
2019-02-25 22:33:17 +09:00
Paul Masurel
fdefea9e26 Removed path reference to tantivy-fst 2019-02-23 10:42:44 +09:00
Paul Masurel
b422f9c389 Partially addresses #500 (#502)
Using `tantivy_fst`. Storing `Weak<Mmap>` in the Mmap cache.
2019-02-23 10:33:59 +09:00
petr-tik
9451fd5b09 MsQueue to channel (#495)
* Format

Made the docstring consistent
remove empty line

* Move matches to dev deps

* Replace MsQueue with an unbounded crossbeam-channel

Questions:
queue.push ignores Result return

How to test pop() calls, if they block

* Format

Made the docstring consistent
remove empty line

* Unwrap the Result of queue.pop

* Addressed Paul's review

wrap the Result-returning send call with expect()

implemented the test not to fail after popping from empty queue

removed references to the Michael-Scott Queue

formatted
2019-02-23 09:06:50 +09:00
Jason Goldberger
788b3803d9 updated changelog (#501)
* updated changelog

* Update CHANGELOG.md

* Update CHANGELOG.md
2019-02-19 00:25:18 +09:00
Paul Masurel
7f0372fa97 reader 2019-02-16 16:09:16 +09:00
Paul Masurel
f8fdf68fcb unit test 2019-02-16 15:49:22 +09:00
Paul Masurel
c00e95cd04 Uncommited is not SegmentRegisters 2019-02-15 22:44:14 +09:00
Paul Masurel
a623d8f6d9 Added SegmentAvailable readonly view 2019-02-15 08:58:08 +09:00
Paul Masurel
5b11228083 Merge branch 'master' of github.com:tantivy-search/tantivy 2019-02-15 08:30:55 +09:00
Paul Masurel
515adff644 Merge branch 'hotfix/0.8.2' 2019-02-15 08:30:27 +09:00
Paul Masurel
e70a45426a 0.8.2 release
Backporting a fix for non x86_64 platforms
2019-02-14 09:16:27 +09:00
Paul Masurel
b3ede2dd7e softcommits 2019-02-13 21:29:54 +09:00
Paul Masurel
b68686f040 opstamp constraint 2019-02-12 18:14:07 +09:00
Paul Masurel
629d3fb37f Added opstamp 2019-02-12 08:49:23 +09:00
Paul Masurel
f513f10e05 fmt 2019-02-08 15:04:35 +09:00
Paul Masurel
f262d4cc22 code cleaning 2019-02-08 14:54:34 +09:00
Paul Masurel
91e89714f4 Added soft commits 2019-02-08 14:42:52 +09:00
Paul Masurel
6fd3cb1254 Renaming 2019-02-06 05:48:15 +01:00
Paul Masurel
549b4e66e5 Using the new API 2019-02-06 00:17:56 +01:00
Paul Masurel
d9b2bf98e2 First stab 2019-02-05 21:23:07 +01:00
98 changed files with 3106 additions and 1342 deletions

View File

@@ -29,7 +29,7 @@ addons:
matrix: matrix:
include: include:
# Android # Android
- env: TARGET=aarch64-linux-android DISABLE_TESTS=1 - env: TARGET=aarch64-linux-android DISABLE_TESTS
#- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1 #- env: TARGET=arm-linux-androideabi DISABLE_TESTS=1
#- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1 #- env: TARGET=armv7-linux-androideabi DISABLE_TESTS=1
#- env: TARGET=i686-linux-android DISABLE_TESTS=1 #- env: TARGET=i686-linux-android DISABLE_TESTS=1
@@ -77,4 +77,4 @@ before_cache:
notifications: notifications:
email: email:
on_success: never on_success: never

View File

@@ -2,10 +2,27 @@ Tantivy 0.9.0
===================== =====================
*0.9.0 index format is not compatible with the *0.9.0 index format is not compatible with the
previous index format.* previous index format.*
- MAJOR BUGFIX :
Some `Mmap` objects were being leaked, and would never get released. (@fulmicoton)
- Removed most unsafe (@fulmicoton) - Removed most unsafe (@fulmicoton)
- Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton) - Indexer memory footprint improved. (VInt comp, inlining the first block. (@fulmicoton)
- Stemming in other language possible (@pentlander) - Stemming in other language possible (@pentlander)
- Segments with no docs are deleted earlier (@barrotsteindev) - Segments with no docs are deleted earlier (@barrotsteindev)
- Added grouped add and delete operations.
They are guaranteed to happen together (i.e. they cannot be split by a commit).
In addition, adds are guaranteed to happen on the same segment. (@elbow-jason)
- Removed `INT_STORED` and `INT_INDEXED`. It is now possible to use `STORED` and `INDEXED`
for int fields. (@fulmicoton)
- Added DateTime field (@barrotsteindev)
- Added IndexReader. By default, index is reloaded automatically upon new commits (@fulmicoton)
- SIMD linear search within blocks (@fulmicoton)
Tantivy 0.8.2
=====================
Fixing build for x86_64 platforms. (#496)
No need to update from 0.8.1 if tantivy
is building on your platform.
Tantivy 0.8.1 Tantivy 0.8.1
===================== =====================

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tantivy" name = "tantivy"
version = "0.9.0-dev" version = "0.9.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"] authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT" license = "MIT"
categories = ["database-implementations", "data-structures"] categories = ["database-implementations", "data-structures"]
@@ -16,8 +16,8 @@ base64 = "0.10.0"
byteorder = "1.0" byteorder = "1.0"
lazy_static = "1" lazy_static = "1"
regex = "1.0" regex = "1.0"
fst = {version="0.3", default-features=false} tantivy-fst = "0.1"
fst-regex = { version="0.2" } memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true} lz4 = {version="1.20", optional=true}
snap = {version="0.2"} snap = {version="0.2"}
atomicwrites = {version="0.2.2", optional=true} atomicwrites = {version="0.2.2", optional=true}
@@ -32,8 +32,9 @@ num_cpus = "1.2"
fs2={version="0.4", optional=true} fs2={version="0.4", optional=true}
itertools = "0.8" itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]} levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true}
bit-set = "0.5" bit-set = "0.5"
uuid = { version = "0.7", features = ["v4", "serde"] } uuid = { version = "0.7.2", features = ["v4", "serde"] }
crossbeam = "0.5" crossbeam = "0.5"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1" futures-cpupool = "0.1"
@@ -41,7 +42,6 @@ owning_ref = "0.4"
stable_deref_trait = "1.0.0" stable_deref_trait = "1.0.0"
rust-stemmers = "1.1" rust-stemmers = "1.1"
downcast-rs = { version="1.0" } downcast-rs = { version="1.0" }
matches = "0.1"
bitpacking = "0.6" bitpacking = "0.6"
census = "0.2" census = "0.2"
fnv = "1.0.6" fnv = "1.0.6"
@@ -51,6 +51,7 @@ htmlescape = "0.3.1"
fail = "0.2" fail = "0.2"
scoped-pool = "1.0" scoped-pool = "1.0"
murmurhash32 = "0.2" murmurhash32 = "0.2"
chrono = "0.4"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.2" winapi = "0.2"
@@ -58,6 +59,8 @@ winapi = "0.2"
[dev-dependencies] [dev-dependencies]
rand = "0.6" rand = "0.6"
maplit = "1" maplit = "1"
matches = "0.1.8"
time = "0.1.42"
[profile.release] [profile.release]
opt-level = 3 opt-level = 3
@@ -71,12 +74,11 @@ overflow-checks = true
[features] [features]
# by default no-fail is disabled. We manually enable it when running test. # by default no-fail is disabled. We manually enable it when running test.
default = ["mmap", "no_fail"] default = ["mmap", "no_fail"]
mmap = ["fst/mmap", "atomicwrites", "fs2"] mmap = ["atomicwrites", "fs2", "memmap", "notify"]
lz4-compression = ["lz4"] lz4-compression = ["lz4"]
no_fail = ["fail/no_fail"] no_fail = ["fail/no_fail"]
unstable = [] # useful for benches. unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"]
[badges] [badges]
travis-ci = { repository = "tantivy-search/tantivy" } travis-ci = { repository = "tantivy-search/tantivy" }

View File

@@ -17,6 +17,7 @@
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/6)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6) [![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/6)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/6)
[![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/7)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7) [![](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/images/7)](https://sourcerer.io/fame/fulmicoton/tantivy-search/tantivy/links/7)
[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
**Tantivy** is a **full text search engine library** written in rust. **Tantivy** is a **full text search engine library** written in rust.
@@ -27,9 +28,18 @@ to build such a search engine.
Tantivy is, in fact, strongly inspired by Lucene's design. Tantivy is, in fact, strongly inspired by Lucene's design.
# Benchmark
Tantivy is typically faster than Lucene, but the results will depend on
the nature of the queries in your workload.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs
performance for different type of queries / collection.
# Features # Features
- Full-text search - Full-text search
- Configurable tokenizer. (stemming available for 17 latin languages. Third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:) - Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools - Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as lucene) - BM25 scoring (the same as lucene)
@@ -41,6 +51,7 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set. - SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
- Single valued and multivalued u64 and i64 fast fields (equivalent of doc values in Lucene) - Single valued and multivalued u64 and i64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields - `&[u8]` fast fields
- Text, i64, u64, dates and hierarchical facet fields
- LZ4 compressed document store - LZ4 compressed document store
- Range queries - Range queries
- Faceted search - Faceted search
@@ -85,6 +96,14 @@ To check out and run tests, you can simply run :
Some tests will not run with just `cargo test` because of `fail-rs`. Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`. To run the tests exhaustively, run `./run-tests.sh`.
# Contribute # How can I support this project ?
Send me an email (paul.masurel at gmail.com) if you want to contribute to tantivy. There are many ways to support this project.
- If you use tantivy, tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Report bugs
- Write a blog post
- Complete documentation
- Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
- Talk about tantivy around you
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)

View File

@@ -20,6 +20,7 @@ use tantivy::collector::TopDocs;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::Index; use tantivy::Index;
use tantivy::ReloadPolicy;
use tempdir::TempDir; use tempdir::TempDir;
fn main() -> tantivy::Result<()> { fn main() -> tantivy::Result<()> {
@@ -170,24 +171,33 @@ fn main() -> tantivy::Result<()> {
// //
// ### Searcher // ### Searcher
// //
// Let's search our index. Start by reloading // A reader is required to get search the index.
// searchers in the index. This should be done // It acts as a `Searcher` pool that reloads itself,
// after every `commit()`. // depending on a `ReloadPolicy`.
index.load_searchers()?; //
// For a search server you will typically create one reader for the entire lifetime of your
// program, and acquire a new searcher for every single request.
//
// In the code below, we rely on the 'ON_COMMIT' policy: the reader
// will reload the index automatically after each commit.
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()?;
// We now need to acquire a searcher. // We now need to acquire a searcher.
// Some search experience might require more than
// one query.
// //
// The searcher ensure that we get to work // A searcher points to snapshotted, immutable version of the index.
// with a consistent version of the index. //
// Some search experience might require more than
// one query. Using the same searcher ensures that all of these queries will run on the
// same version of the index.
// //
// Acquiring a `searcher` is very cheap. // Acquiring a `searcher` is very cheap.
// //
// You should acquire a searcher every time you // You should acquire a searcher every time you start processing a request and
// start processing a request and
// and release it right after your query is finished. // and release it right after your query is finished.
let searcher = index.searcher(); let searcher = reader.searcher();
// ### Query // ### Query
@@ -224,7 +234,6 @@ fn main() -> tantivy::Result<()> {
// Since the body field was not configured as stored, // Since the body field was not configured as stored,
// the document returned will only contain // the document returned will only contain
// a title. // a title.
for (_score, doc_address) in top_docs { for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?; let retrieved_doc = searcher.doc(doc_address)?;
println!("{}", schema.to_json(&retrieved_doc)); println!("{}", schema.to_json(&retrieved_doc));

View File

@@ -17,7 +17,7 @@ use tantivy::collector::{Collector, SegmentCollector};
use tantivy::fastfield::FastFieldReader; use tantivy::fastfield::FastFieldReader;
use tantivy::query::QueryParser; use tantivy::query::QueryParser;
use tantivy::schema::Field; use tantivy::schema::Field;
use tantivy::schema::{Schema, FAST, INT_INDEXED, TEXT}; use tantivy::schema::{Schema, FAST, INDEXED, TEXT};
use tantivy::Index; use tantivy::Index;
use tantivy::SegmentReader; use tantivy::SegmentReader;
@@ -137,7 +137,7 @@ fn main() -> tantivy::Result<()> {
// products, and with a name, a description, and a price. // products, and with a name, a description, and a price.
let product_name = schema_builder.add_text_field("name", TEXT); let product_name = schema_builder.add_text_field("name", TEXT);
let product_description = schema_builder.add_text_field("description", TEXT); let product_description = schema_builder.add_text_field("description", TEXT);
let price = schema_builder.add_u64_field("price", INT_INDEXED | FAST); let price = schema_builder.add_u64_field("price", INDEXED | FAST);
let schema = schema_builder.build(); let schema = schema_builder.build();
// # Indexing documents // # Indexing documents
@@ -170,9 +170,9 @@ fn main() -> tantivy::Result<()> {
price => 5_200u64 price => 5_200u64
)); ));
index_writer.commit()?; index_writer.commit()?;
index.load_searchers()?;
let searcher = index.searcher(); let reader = index.reader()?;
let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]); let query_parser = QueryParser::for_index(&index, vec![product_name, product_description]);
// here we want to get a hit on the 'ken' in Frankenstein // here we want to get a hit on the 'ken' in Frankenstein

View File

@@ -91,9 +91,9 @@ fn main() -> tantivy::Result<()> {
increasing confidence in the success of my undertaking."# increasing confidence in the success of my undertaking."#
)); ));
index_writer.commit()?; index_writer.commit()?;
index.load_searchers()?;
let searcher = index.searcher(); let reader = index.reader()?;
let searcher = reader.searcher();
// The query parser can interpret human queries. // The query parser can interpret human queries.
// Here, if the user does not specify which // Here, if the user does not specify which

View File

@@ -14,12 +14,16 @@ use tantivy::collector::TopDocs;
use tantivy::query::TermQuery; use tantivy::query::TermQuery;
use tantivy::schema::*; use tantivy::schema::*;
use tantivy::Index; use tantivy::Index;
use tantivy::IndexReader;
// A simple helper function to fetch a single document // A simple helper function to fetch a single document
// given its id from our index. // given its id from our index.
// It will be helpful to check our work. // It will be helpful to check our work.
fn extract_doc_given_isbn(index: &Index, isbn_term: &Term) -> tantivy::Result<Option<Document>> { fn extract_doc_given_isbn(
let searcher = index.searcher(); reader: &IndexReader,
isbn_term: &Term,
) -> tantivy::Result<Option<Document>> {
let searcher = reader.searcher();
// This is the simplest query you can think of. // This is the simplest query you can think of.
// It matches all of the documents containing a specific term. // It matches all of the documents containing a specific term.
@@ -85,12 +89,12 @@ fn main() -> tantivy::Result<()> {
isbn => "978-9176370711", isbn => "978-9176370711",
)); ));
index_writer.commit()?; index_writer.commit()?;
index.load_searchers()?; let reader = index.reader()?;
let frankenstein_isbn = Term::from_field_text(isbn, "978-9176370711"); let frankenstein_isbn = Term::from_field_text(isbn, "978-9176370711");
// Oops our frankenstein doc seems mispelled // Oops our frankenstein doc seems mispelled
let frankenstein_doc_misspelled = extract_doc_given_isbn(&index, &frankenstein_isbn)?.unwrap(); let frankenstein_doc_misspelled = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
assert_eq!( assert_eq!(
schema.to_json(&frankenstein_doc_misspelled), schema.to_json(&frankenstein_doc_misspelled),
r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#, r#"{"isbn":["978-9176370711"],"title":["Frankentein"]}"#,
@@ -129,10 +133,10 @@ fn main() -> tantivy::Result<()> {
// Everything happened as if the document was updated. // Everything happened as if the document was updated.
index_writer.commit()?; index_writer.commit()?;
// We reload our searcher to make our change available to clients. // We reload our searcher to make our change available to clients.
index.load_searchers()?; reader.reload()?;
// No more typo! // No more typo!
let frankenstein_new_doc = extract_doc_given_isbn(&index, &frankenstein_isbn)?.unwrap(); let frankenstein_new_doc = extract_doc_given_isbn(&reader, &frankenstein_isbn)?.unwrap();
assert_eq!( assert_eq!(
schema.to_json(&frankenstein_new_doc), schema.to_json(&frankenstein_new_doc),
r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#, r#"{"isbn":["978-9176370711"],"title":["Frankenstein"]}"#,

View File

@@ -55,9 +55,9 @@ fn main() -> tantivy::Result<()> {
index_writer.commit()?; index_writer.commit()?;
index.load_searchers()?; let reader = index.reader()?;
let searcher = index.searcher(); let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field(tags); let mut facet_collector = FacetCollector::for_field(tags);
facet_collector.add_facet("/pools"); facet_collector.add_facet("/pools");

View File

@@ -7,17 +7,19 @@
extern crate tantivy; extern crate tantivy;
use tantivy::collector::Count; use tantivy::collector::Count;
use tantivy::query::RangeQuery; use tantivy::query::RangeQuery;
use tantivy::schema::{Schema, INT_INDEXED}; use tantivy::schema::{Schema, INDEXED};
use tantivy::Index; use tantivy::Index;
use tantivy::Result; use tantivy::Result;
fn run() -> Result<()> { fn run() -> Result<()> {
// For the sake of simplicity, this schema will only have 1 field // For the sake of simplicity, this schema will only have 1 field
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
// INT_INDEXED is shorthand for such fields
let year_field = schema_builder.add_u64_field("year", INT_INDEXED); // `INDEXED` is a short-hand to indicate that our field should be "searchable".
let year_field = schema_builder.add_u64_field("year", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let reader = index.reader()?;
{ {
let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?; let mut index_writer = index.writer_with_num_threads(1, 6_000_000)?;
for year in 1950u64..2019u64 { for year in 1950u64..2019u64 {
@@ -26,8 +28,8 @@ fn run() -> Result<()> {
index_writer.commit()?; index_writer.commit()?;
// The index will be a range of years // The index will be a range of years
} }
index.load_searchers()?; reader.reload()?;
let searcher = index.searcher(); let searcher = reader.searcher();
// The end is excluded i.e. here we are searching up to 1969 // The end is excluded i.e. here we are searching up to 1969
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970); let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
// Uses a Count collector to sum the total number of docs in the range // Uses a Count collector to sum the total number of docs in the range

View File

@@ -33,9 +33,9 @@ fn main() -> tantivy::Result<()> {
index_writer.add_document(doc!(title => "The modern Promotheus")); index_writer.add_document(doc!(title => "The modern Promotheus"));
index_writer.commit()?; index_writer.commit()?;
index.load_searchers()?; let reader = index.reader()?;
let searcher = index.searcher(); let searcher = reader.searcher();
// A tantivy index is actually a collection of segments. // A tantivy index is actually a collection of segments.
// Similarly, a searcher just wraps a list `segment_reader`. // Similarly, a searcher just wraps a list `segment_reader`.

View File

@@ -48,9 +48,8 @@ fn main() -> tantivy::Result<()> {
// ... // ...
index_writer.commit()?; index_writer.commit()?;
index.load_searchers()?; let reader = index.reader()?;
let searcher = reader.searcher();
let searcher = index.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, body]); let query_parser = QueryParser::for_index(&index, vec![title, body]);
let query = query_parser.parse_query("sycamore spring")?; let query = query_parser.parse_query("sycamore spring")?;

View File

@@ -96,9 +96,9 @@ fn main() -> tantivy::Result<()> {
index_writer.commit()?; index_writer.commit()?;
index.load_searchers()?; let reader = index.reader()?;
let searcher = index.searcher(); let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, body]); let query_parser = QueryParser::for_index(&index, vec![title, body]);

View File

@@ -12,7 +12,7 @@ fn main() -> tantivy::Result<()> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT | STORED); schema_builder.add_text_field("title", TEXT | STORED);
schema_builder.add_text_field("body", TEXT); schema_builder.add_text_field("body", TEXT);
schema_builder.add_u64_field("year", INT_INDEXED); schema_builder.add_u64_field("year", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
// Let's assume we have a json-serialized document. // Let's assume we have a json-serialized document.

View File

@@ -40,8 +40,8 @@ use SegmentReader;
/// index_writer.commit().unwrap(); /// index_writer.commit().unwrap();
/// } /// }
/// ///
/// index.load_searchers()?; /// let reader = index.reader()?;
/// let searcher = index.searcher(); /// let searcher = reader.searcher();
/// ///
/// { /// {
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// let query_parser = QueryParser::for_index(&index, vec![title]);

View File

@@ -122,17 +122,16 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// facet => Facet::from("/lang/en"), /// facet => Facet::from("/lang/en"),
/// facet => Facet::from("/category/biography") /// facet => Facet::from("/category/biography")
/// )); /// ));
/// index_writer.commit().unwrap(); /// index_writer.commit()?;
/// } /// }
/// /// let reader = index.reader()?;
/// index.load_searchers()?; /// let searcher = reader.searcher();
/// let searcher = index.searcher();
/// ///
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/lang"); /// facet_collector.add_facet("/lang");
/// facet_collector.add_facet("/category"); /// facet_collector.add_facet("/category");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap(); /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
/// // This lists all of the facet counts /// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = facet_counts /// let facets: Vec<(&Facet, u64)> = facet_counts
@@ -147,7 +146,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap(); /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
/// // This lists all of the facet counts /// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = facet_counts /// let facets: Vec<(&Facet, u64)> = facet_counts
@@ -163,7 +162,7 @@ fn facet_depth(facet_bytes: &[u8]) -> usize {
/// { /// {
/// let mut facet_collector = FacetCollector::for_field(facet); /// let mut facet_collector = FacetCollector::for_field(facet);
/// facet_collector.add_facet("/category/fiction"); /// facet_collector.add_facet("/category/fiction");
/// let facet_counts = searcher.search(&AllQuery, &facet_collector).unwrap(); /// let facet_counts = searcher.search(&AllQuery, &facet_collector)?;
/// ///
/// // This lists all of the facet counts /// // This lists all of the facet counts
/// let facets: Vec<(&Facet, u64)> = facet_counts.top_k("/category/fiction", 1); /// let facets: Vec<(&Facet, u64)> = facet_counts.top_k("/category/fiction", 1);
@@ -483,8 +482,8 @@ mod tests {
index_writer.add_document(doc); index_writer.add_document(doc);
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top1")); facet_collector.add_facet(Facet::from("/top1"));
let counts = searcher.search(&AllQuery, &facet_collector).unwrap(); let counts = searcher.search(&AllQuery, &facet_collector).unwrap();
@@ -532,8 +531,8 @@ mod tests {
facet_field => Facet::from_text(&"/subjects/B/b"), facet_field => Facet::from_text(&"/subjects/B/b"),
)); ));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 1); assert_eq!(searcher.num_docs(), 1);
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/subjects"); facet_collector.add_facet("/subjects");
@@ -579,9 +578,7 @@ mod tests {
index_writer.add_document(doc); index_writer.add_document(doc);
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet("/facet"); facet_collector.add_facet("/facet");
@@ -635,8 +632,7 @@ mod bench {
index_writer.add_document(doc); index_writer.add_document(doc);
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
b.iter(|| { b.iter(|| {
let searcher = index.searcher(); let searcher = index.searcher();
let facet_collector = FacetCollector::for_field(facet_field); let facet_collector = FacetCollector::for_field(facet_field);

View File

@@ -101,8 +101,7 @@ mod tests {
assert_eq!(index_writer.commit().unwrap(), 10u64); assert_eq!(index_writer.commit().unwrap(), 10u64);
} }
index.load_searchers().unwrap(); let searcher = index.reader().searcher();
let searcher = index.searcher();
let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64); let mut ffvf_i64: IntFacetCollector<I64FastFieldReader> = IntFacetCollector::new(num_field_i64);
let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64); let mut ffvf_u64: IntFacetCollector<U64FastFieldReader> = IntFacetCollector::new(num_field_u64);

View File

@@ -53,9 +53,9 @@ use tantivy::collector::{Count, TopDocs};
# index_writer.add_document(doc!( # index_writer.add_document(doc!(
# title => "The Diary of Muadib", # title => "The Diary of Muadib",
# )); # ));
# index_writer.commit().unwrap(); # index_writer.commit()?;
# index.load_searchers()?; # let reader = index.reader()?;
# let searcher = index.searcher(); # let searcher = reader.searcher();
# let query_parser = QueryParser::for_index(&index, vec![title]); # let query_parser = QueryParser::for_index(&index, vec![title]);
# let query = query_parser.parse_query("diary")?; # let query = query_parser.parse_query("diary")?;
let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) = let (doc_count, top_docs): (usize, Vec<(Score, DocAddress)>) =

View File

@@ -134,8 +134,8 @@ impl<TFruit: Fruit> FruitHandle<TFruit> {
/// index_writer.commit().unwrap(); /// index_writer.commit().unwrap();
/// } /// }
/// ///
/// index.load_searchers()?; /// let reader = index.reader()?;
/// let searcher = index.searcher(); /// let searcher = reader.searcher();
/// ///
/// let mut collectors = MultiCollector::new(); /// let mut collectors = MultiCollector::new();
/// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2)); /// let top_docs_handle = collectors.add_collector(TopDocs::with_limit(2));
@@ -278,8 +278,7 @@ mod tests {
index_writer.add_document(doc!(text=>"abc")); index_writer.add_document(doc!(text=>"abc"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let term = Term::from_field_text(text, "abc"); let term = Term::from_field_text(text, "abc");
let query = TermQuery::new(term, IndexRecordOption::Basic); let query = TermQuery::new(term, IndexRecordOption::Basic);

View File

@@ -23,15 +23,16 @@ use SegmentReader;
/// # use tantivy::schema::{Schema, Field, FAST, TEXT}; /// # use tantivy::schema::{Schema, Field, FAST, TEXT};
/// # use tantivy::{Index, Result, DocAddress}; /// # use tantivy::{Index, Result, DocAddress};
/// # use tantivy::query::{Query, QueryParser}; /// # use tantivy::query::{Query, QueryParser};
/// use tantivy::Searcher;
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// ///
/// # fn main() { /// # fn main() -> tantivy::Result<()> {
/// # let mut schema_builder = Schema::builder(); /// # let mut schema_builder = Schema::builder();
/// # let title = schema_builder.add_text_field("title", TEXT); /// # let title = schema_builder.add_text_field("title", TEXT);
/// # let rating = schema_builder.add_u64_field("rating", FAST); /// # let rating = schema_builder.add_u64_field("rating", FAST);
/// # let schema = schema_builder.build(); /// # let schema = schema_builder.build();
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
/// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); /// # let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
/// # index_writer.add_document(doc!( /// # index_writer.add_document(doc!(
/// # title => "The Name of the Wind", /// # title => "The Name of the Wind",
/// # rating => 92u64, /// # rating => 92u64,
@@ -39,13 +40,14 @@ use SegmentReader;
/// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64)); /// # index_writer.add_document(doc!(title => "The Diary of Muadib", rating => 97u64));
/// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64)); /// # index_writer.add_document(doc!(title => "A Dairy Cow", rating => 63u64));
/// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64)); /// # index_writer.add_document(doc!(title => "The Diary of a Young Girl", rating => 80u64));
/// # index_writer.commit().unwrap(); /// # index_writer.commit()?;
/// # index.load_searchers().unwrap(); /// # let reader = index.reader()?;
/// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary").unwrap(); /// # let query = QueryParser::for_index(&index, vec![title]).parse_query("diary")?;
/// # let top_docs = docs_sorted_by_rating(&index, &query, rating).unwrap(); /// # let top_docs = docs_sorted_by_rating(&reader.searcher(), &query, rating)?;
/// # assert_eq!(top_docs, /// # assert_eq!(top_docs,
/// # vec![(97u64, DocAddress(0u32, 1)), /// # vec![(97u64, DocAddress(0u32, 1)),
/// # (80u64, DocAddress(0u32, 3))]); /// # (80u64, DocAddress(0u32, 3))]);
/// # Ok(())
/// # } /// # }
/// # /// #
/// /// Searches the document matching the given query, and /// /// Searches the document matching the given query, and
@@ -53,7 +55,9 @@ use SegmentReader;
/// /// given in argument. /// /// given in argument.
/// /// /// ///
/// /// `field` is required to be a FAST field. /// /// `field` is required to be a FAST field.
/// fn docs_sorted_by_rating(index: &Index, query: &Query, sort_by_field: Field) /// fn docs_sorted_by_rating(searcher: &Searcher,
/// query: &Query,
/// sort_by_field: Field)
/// -> Result<Vec<(u64, DocAddress)>> { /// -> Result<Vec<(u64, DocAddress)>> {
/// ///
/// // This is where we build our collector! /// // This is where we build our collector!
@@ -61,8 +65,7 @@ use SegmentReader;
/// ///
/// // ... and here is our documents. Not this is a simple vec. /// // ... and here is our documents. Not this is a simple vec.
/// // The `u64` in the pair is the value of our fast field for each documents. /// // The `u64` in the pair is the value of our fast field for each documents.
/// index.searcher() /// searcher.search(query, &top_docs_by_rating)
/// .search(query, &top_docs_by_rating)
/// } /// }
/// ``` /// ```
pub struct TopDocsByField<T> { pub struct TopDocsByField<T> {
@@ -76,6 +79,12 @@ impl<T: FastValue + PartialOrd + Clone> TopDocsByField<T> {
/// The given field name must be a fast field, otherwise the collector have an error while /// The given field name must be a fast field, otherwise the collector have an error while
/// collecting results. /// collecting results.
/// ///
/// This constructor is crate-private. Client are supposed to call
/// build `TopDocsByField` object using the `TopDocs` API.
///
/// e.g.:
/// `TopDocs::with_limit(2).order_by_field(sort_by_field)`
///
/// # Panics /// # Panics
/// The method panics if limit is 0 /// The method panics if limit is 0
pub(crate) fn new(field: Field, limit: usize) -> TopDocsByField<T> { pub(crate) fn new(field: Field, limit: usize) -> TopDocsByField<T> {
@@ -171,7 +180,7 @@ mod tests {
size => 16u64, size => 16u64,
)); ));
}); });
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_field(size); let top_collector = TopDocs::with_limit(4).order_by_field(size);
let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap(); let top_docs: Vec<(u64, DocAddress)> = searcher.search(&query, &top_collector).unwrap();
@@ -198,7 +207,7 @@ mod tests {
size => 12u64, size => 12u64,
)); ));
}); });
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2)); let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(Field(2));
let segment_reader = searcher.segment_reader(0u32); let segment_reader = searcher.segment_reader(0u32);
top_collector top_collector
@@ -218,7 +227,7 @@ mod tests {
size => 12u64, size => 12u64,
)); ));
}); });
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let segment = searcher.segment_reader(0); let segment = searcher.segment_reader(0);
let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size); let top_collector: TopDocsByField<u64> = TopDocs::with_limit(4).order_by_field(size);
assert_matches!( assert_matches!(
@@ -241,8 +250,6 @@ mod tests {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
doc_adder(&mut index_writer); doc_adder(&mut index_writer);
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap();
let query_parser = QueryParser::for_index(&index, vec![query_field]); let query_parser = QueryParser::for_index(&index, vec![query_field]);
let query = query_parser.parse_query(query).unwrap(); let query = query_parser.parse_query(query).unwrap();
(index, query) (index, query)

View File

@@ -51,8 +51,8 @@ use SegmentReader;
/// index_writer.commit().unwrap(); /// index_writer.commit().unwrap();
/// } /// }
/// ///
/// index.load_searchers()?; /// let reader = index.reader()?;
/// let searcher = index.searcher(); /// let searcher = reader.searcher();
/// ///
/// let query_parser = QueryParser::for_index(&index, vec![title]); /// let query_parser = QueryParser::for_index(&index, vec![title]);
/// let query = query_parser.parse_query("diary")?; /// let query = query_parser.parse_query("diary")?;
@@ -148,7 +148,6 @@ mod tests {
index_writer.add_document(doc!(text_field=>"I like Droopy")); index_writer.add_document(doc!(text_field=>"I like Droopy"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap();
index index
} }
@@ -159,6 +158,8 @@ mod tests {
let query_parser = QueryParser::for_index(&index, vec![field]); let query_parser = QueryParser::for_index(&index, vec![field]);
let text_query = query_parser.parse_query("droopy tax").unwrap(); let text_query = query_parser.parse_query("droopy tax").unwrap();
let score_docs: Vec<(Score, DocAddress)> = index let score_docs: Vec<(Score, DocAddress)> = index
.reader()
.unwrap()
.searcher() .searcher()
.search(&text_query, &TopDocs::with_limit(4)) .search(&text_query, &TopDocs::with_limit(4))
.unwrap(); .unwrap();
@@ -179,6 +180,8 @@ mod tests {
let query_parser = QueryParser::for_index(&index, vec![field]); let query_parser = QueryParser::for_index(&index, vec![field]);
let text_query = query_parser.parse_query("droopy tax").unwrap(); let text_query = query_parser.parse_query("droopy tax").unwrap();
let score_docs: Vec<(Score, DocAddress)> = index let score_docs: Vec<(Score, DocAddress)> = index
.reader()
.unwrap()
.searcher() .searcher()
.search(&text_query, &TopDocs::with_limit(2)) .search(&text_query, &TopDocs::with_limit(2))
.unwrap(); .unwrap();

View File

@@ -80,7 +80,7 @@ where
(1u64 << num_bits) - 1u64 (1u64 << num_bits) - 1u64
}; };
BitUnpacker { BitUnpacker {
num_bits: num_bits as u64, num_bits: u64::from(num_bits),
mask, mask,
data, data,
} }

View File

@@ -13,7 +13,11 @@ pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt}; pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
pub use byteorder::LittleEndian as Endianness; pub use byteorder::LittleEndian as Endianness;
use std::io;
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
///
/// We do not allow segments with more than
pub const MAX_DOC_LIMIT: u32 = 1 << 31;
/// Computes the number of bits that will be used for bitpacking. /// Computes the number of bits that will be used for bitpacking.
/// ///
@@ -52,11 +56,6 @@ pub(crate) fn is_power_of_2(n: usize) -> bool {
(n > 0) && (n & (n - 1) == 0) (n > 0) && (n & (n - 1) == 0)
} }
/// Create a default io error given a string.
pub(crate) fn make_io_err(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg)
}
/// Has length trait /// Has length trait
pub trait HasLen { pub trait HasLen {
/// Return length /// Return length
@@ -134,4 +133,11 @@ pub(crate) mod test {
assert_eq!(compute_num_bits(256), 9u8); assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8); assert_eq!(compute_num_bits(5_000_000_000), 33u8);
} }
#[test]
fn test_max_doc() {
// this is the first time I write a unit test for a constant.
assert!(((super::MAX_DOC_LIMIT - 1) as i32) >= 0);
assert!((super::MAX_DOC_LIMIT as i32) < 0);
}
} }

View File

@@ -123,15 +123,14 @@ mod tests {
} }
} }
} #[test]
fn test_map_multithread() {
#[test] let result: Vec<usize> = Executor::multi_thread(3, "search-test")
fn test_map_multithread() { .map(|i| Ok(i * 2), 0..10)
let result: Vec<usize> = Executor::multi_thread(3, "search-test") .unwrap();
.map(|i| Ok(i * 2), 0..10) assert_eq!(result.len(), 10);
.unwrap(); for i in 0..10 {
assert_eq!(result.len(), 10); assert_eq!(result[i], i * 2);
for i in 0..10 { }
assert_eq!(result[i], i * 2);
} }
} }

View File

@@ -1,19 +1,14 @@
use super::pool::LeasedItem;
use super::pool::Pool;
use super::segment::create_segment; use super::segment::create_segment;
use super::segment::Segment; use super::segment::Segment;
use core::searcher::Searcher;
use core::Executor; use core::Executor;
use core::IndexMeta; use core::IndexMeta;
use core::SegmentId; use core::SegmentId;
use core::SegmentMeta; use core::SegmentMeta;
use core::SegmentReader;
use core::META_FILEPATH; use core::META_FILEPATH;
use directory::ManagedDirectory; use directory::ManagedDirectory;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use directory::MmapDirectory; use directory::MmapDirectory;
use directory::INDEX_WRITER_LOCK; use directory::INDEX_WRITER_LOCK;
use directory::META_LOCK;
use directory::{Directory, RAMDirectory}; use directory::{Directory, RAMDirectory};
use error::DataCorruption; use error::DataCorruption;
use error::TantivyError; use error::TantivyError;
@@ -21,14 +16,16 @@ use indexer::index_writer::open_index_writer;
use indexer::index_writer::HEAP_SIZE_MIN; use indexer::index_writer::HEAP_SIZE_MIN;
use indexer::segment_updater::save_new_metas; use indexer::segment_updater::save_new_metas;
use num_cpus; use num_cpus;
use reader::IndexReader;
use reader::IndexReaderBuilder;
use schema::Field; use schema::Field;
use schema::FieldType; use schema::FieldType;
use schema::Schema; use schema::Schema;
use serde_json; use serde_json;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::fmt; use std::fmt;
#[cfg(feature = "mmap")]
use std::path::Path; use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use tokenizer::BoxedTokenizer; use tokenizer::BoxedTokenizer;
use tokenizer::TokenizerManager; use tokenizer::TokenizerManager;
@@ -49,11 +46,10 @@ fn load_metas(directory: &Directory) -> Result<IndexMeta> {
} }
/// Search Index /// Search Index
#[derive(Clone)]
pub struct Index { pub struct Index {
directory: ManagedDirectory, directory: ManagedDirectory,
schema: Schema, schema: Schema,
num_searchers: Arc<AtomicUsize>,
searcher_pool: Arc<Pool<Searcher>>,
executor: Arc<Executor>, executor: Arc<Executor>,
tokenizers: TokenizerManager, tokenizers: TokenizerManager,
} }
@@ -158,16 +154,12 @@ impl Index {
/// Creates a new index given a directory and an `IndexMeta`. /// Creates a new index given a directory and an `IndexMeta`.
fn create_from_metas(directory: ManagedDirectory, metas: &IndexMeta) -> Result<Index> { fn create_from_metas(directory: ManagedDirectory, metas: &IndexMeta) -> Result<Index> {
let schema = metas.schema.clone(); let schema = metas.schema.clone();
let n_cpus = num_cpus::get();
let index = Index { let index = Index {
directory, directory,
schema, schema,
num_searchers: Arc::new(AtomicUsize::new(n_cpus)),
searcher_pool: Arc::new(Pool::new()),
tokenizers: TokenizerManager::default(), tokenizers: TokenizerManager::default(),
executor: Arc::new(Executor::single_thread()), executor: Arc::new(Executor::single_thread()),
}; };
index.load_searchers()?;
Ok(index) Ok(index)
} }
@@ -197,6 +189,22 @@ impl Index {
} }
} }
/// Create a default `IndexReader` for the given index.
///
/// See [`Index.reader_builder()`](#method.reader_builder).
pub fn reader(&self) -> Result<IndexReader> {
self.reader_builder().try_into()
}
/// Create a `IndexReader` for the given index.
///
/// Most project should create at most one reader for a given index.
/// This method is typically called only once per `Index` instance,
/// over the lifetime of most problem.
pub fn reader_builder(&self) -> IndexReaderBuilder {
IndexReaderBuilder::new(self.clone())
}
/// Opens a new directory from an index path. /// Opens a new directory from an index path.
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> Result<Index> { pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
@@ -335,53 +343,6 @@ impl Index {
.map(|segment_meta| segment_meta.id()) .map(|segment_meta| segment_meta.id())
.collect()) .collect())
} }
/// Sets the number of searchers to use
///
/// Only works after the next call to `load_searchers`
pub fn set_num_searchers(&mut self, num_searchers: usize) {
self.num_searchers.store(num_searchers, Ordering::Release);
}
/// Update searchers so that they reflect the state of the last
/// `.commit()`.
///
/// If indexing happens in the same process as searching,
/// you most likely want to call `.load_searchers()` right after each
/// successful call to `.commit()`.
///
/// If indexing and searching happen in different processes, the way to
/// get the freshest `index` at all time, is to watch `meta.json` and
/// call `load_searchers` whenever a changes happen.
pub fn load_searchers(&self) -> Result<()> {
let _meta_lock = self.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = self.searchable_segments()?;
let segment_readers: Vec<SegmentReader> = searchable_segments
.iter()
.map(SegmentReader::open)
.collect::<Result<_>>()?;
let schema = self.schema();
let num_searchers: usize = self.num_searchers.load(Ordering::Acquire);
let searchers = (0..num_searchers)
.map(|_| Searcher::new(schema.clone(), self.clone(), segment_readers.clone()))
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}
/// Returns a searcher
///
/// This method should be called every single time a search
/// query is performed.
/// The searchers are taken from a pool of `num_searchers` searchers.
/// If no searcher is available
/// this may block.
///
/// The same searcher must be used for a given query, as it ensures
/// the use of a consistent segment set.
pub fn searcher(&self) -> LeasedItem<Searcher> {
self.searcher_pool.acquire()
}
} }
impl fmt::Debug for Index { impl fmt::Debug for Index {
@@ -390,29 +351,22 @@ impl fmt::Debug for Index {
} }
} }
impl Clone for Index {
fn clone(&self) -> Index {
Index {
directory: self.directory.clone(),
schema: self.schema.clone(),
num_searchers: Arc::clone(&self.num_searchers),
searcher_pool: Arc::clone(&self.searcher_pool),
tokenizers: self.tokenizers.clone(),
executor: self.executor.clone(),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use directory::RAMDirectory; use directory::RAMDirectory;
use schema::{Schema, INT_INDEXED, TEXT}; use schema::Field;
use schema::{Schema, INDEXED, TEXT};
use std::thread;
use std::time::Duration;
use Index; use Index;
use IndexReader;
use IndexWriter;
use ReloadPolicy;
#[test] #[test]
fn test_indexer_for_field() { fn test_indexer_for_field() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let num_likes_field = schema_builder.add_u64_field("num_likes", INT_INDEXED); let num_likes_field = schema_builder.add_u64_field("num_likes", INDEXED);
let body_field = schema_builder.add_text_field("body", TEXT); let body_field = schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -470,7 +424,117 @@ mod tests {
fn throw_away_schema() -> Schema { fn throw_away_schema() -> Schema {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let _ = schema_builder.add_u64_field("num_likes", INT_INDEXED); let _ = schema_builder.add_u64_field("num_likes", INDEXED);
schema_builder.build() schema_builder.build()
} }
#[test]
fn test_index_on_commit_reload_policy() {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let index = Index::create_in_ram(schema);
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
}
#[cfg(feature = "mmap")]
mod mmap_specific {
use super::*;
use std::path::PathBuf;
use tempdir::TempDir;
#[test]
fn test_index_on_commit_reload_policy_mmap() {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
}
#[test]
fn test_index_manual_policy_mmap() {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let index = Index::create_from_tempdir(schema).unwrap();
let mut writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
writer.commit().unwrap();
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64));
writer.commit().unwrap();
thread::sleep(Duration::from_millis(500));
assert_eq!(reader.searcher().num_docs(), 0);
reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 1);
}
#[test]
fn test_index_on_commit_reload_policy_different_directories() {
let schema = throw_away_schema();
let field = schema.get_field("num_likes").unwrap();
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let write_index = Index::create_in_dir(&tempdir_path, schema).unwrap();
let read_index = Index::open_in_dir(&tempdir_path).unwrap();
let reader = read_index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0);
let mut writer = write_index.writer_with_num_threads(1, 3_000_000).unwrap();
test_index_on_commit_reload_policy_aux(field, &mut writer, &reader);
}
}
fn test_index_on_commit_reload_policy_aux(
field: Field,
writer: &mut IndexWriter,
reader: &IndexReader,
) {
assert_eq!(reader.searcher().num_docs(), 0);
writer.add_document(doc!(field=>1u64));
writer.commit().unwrap();
let mut count = 0;
for _ in 0..100 {
count = reader.searcher().num_docs();
if count > 0 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 1);
writer.add_document(doc!(field=>2u64));
writer.commit().unwrap();
let mut count = 0;
for _ in 0..10 {
count = reader.searcher().num_docs();
if count > 1 {
break;
}
thread::sleep(Duration::from_millis(100));
}
assert_eq!(count, 2);
}
} }

View File

@@ -2,7 +2,6 @@ mod executor;
pub mod index; pub mod index;
mod index_meta; mod index_meta;
mod inverted_index_reader; mod inverted_index_reader;
mod pool;
pub mod searcher; pub mod searcher;
mod segment; mod segment;
mod segment_component; mod segment_component;
@@ -25,6 +24,7 @@ pub use self::segment_reader::SegmentReader;
use std::path::PathBuf; use std::path::PathBuf;
lazy_static! { lazy_static! {
/// The meta file contains all the information about the list of segments and the schema /// The meta file contains all the information about the list of segments and the schema
/// of the index. /// of the index.
pub static ref META_FILEPATH: PathBuf = PathBuf::from("meta.json"); pub static ref META_FILEPATH: PathBuf = PathBuf::from("meta.json");

View File

@@ -19,7 +19,7 @@ pub struct SegmentId(Uuid);
#[cfg(test)] #[cfg(test)]
lazy_static! { lazy_static! {
static ref AUTO_INC_COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::default(); static ref AUTO_INC_COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::default();
static ref EMPTY_ARR: [u8; 8] = [0u8; 8]; static ref ZERO_ARRAY: [u8; 8] = [0u8; 8];
} }
// During tests, we generate the segment id in a autoincrement manner // During tests, we generate the segment id in a autoincrement manner
@@ -30,7 +30,7 @@ lazy_static! {
#[cfg(test)] #[cfg(test)]
fn create_uuid() -> Uuid { fn create_uuid() -> Uuid {
let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst); let new_auto_inc_id = (*AUTO_INC_COUNTER).fetch_add(1, atomic::Ordering::SeqCst);
Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &*EMPTY_ARR).unwrap() Uuid::from_fields(new_auto_inc_id as u32, 0, 0, &*ZERO_ARRAY).unwrap()
} }
#[cfg(not(test))] #[cfg(not(test))]

View File

@@ -477,9 +477,7 @@ mod test {
// ok, now we should have a deleted doc // ok, now we should have a deleted doc
index_writer2.commit().unwrap(); index_writer2.commit().unwrap();
} }
let searcher = index.reader().unwrap().searcher();
index.load_searchers().unwrap();
let searcher = index.searcher();
let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect(); let docs: Vec<DocId> = searcher.segment_reader(0).doc_ids_alive().collect();
assert_eq!(vec![0u32, 2u32], docs); assert_eq!(vec![0u32, 2u32], docs);
} }

View File

@@ -1,6 +1,8 @@
use directory::directory_lock::Lock; use directory::directory_lock::Lock;
use directory::error::LockError; use directory::error::LockError;
use directory::error::{DeleteError, OpenReadError, OpenWriteError}; use directory::error::{DeleteError, OpenReadError, OpenWriteError};
use directory::WatchCallback;
use directory::WatchHandle;
use directory::{ReadOnlySource, WritePtr}; use directory::{ReadOnlySource, WritePtr};
use std::fmt; use std::fmt;
use std::io; use std::io;
@@ -187,6 +189,32 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
} }
} }
} }
/// Registers a callback that will be called whenever a change on the `meta.json`
/// using the `atomic_write` API is detected.
///
/// The behavior when using `.watch()` on a file using `.open_write(...)` is, on the other
/// hand, undefined.
///
/// The file will be watched for the lifetime of the returned `WatchHandle`. The caller is
/// required to keep it.
/// It does not override previous callbacks. When the file is modified, all callback that are
/// registered (and whose `WatchHandle` is still alive) are triggered.
///
/// Internally, tantivy only uses this API to detect new commits to implement the
/// `OnCommit` `ReloadPolicy`. Not implementing watch in a `Directory` only prevents the
/// `OnCommit` `ReloadPolicy` to work properly.
fn watch(&self, watch_callback: WatchCallback) -> WatchHandle;
/// Ensure that all volatile files reach are persisted (in directory where that makes sense.)
///
/// In order to make Near Real Time efficient, tantivy introduced the notion of soft_commit vs
/// commit. Commit will call `.flush()`, while softcommit won't.
///
/// `meta.json` should be the last file to be flushed.
fn flush(&self) -> io::Result<()> {
Ok(())
}
} }
/// DirectoryClone /// DirectoryClone

View File

@@ -43,7 +43,7 @@ lazy_static! {
is_blocking: false is_blocking: false
}; };
/// The meta lock file is here to protect the segment files being opened by /// The meta lock file is here to protect the segment files being opened by
/// `.load_searchers()` from being garbage collected. /// `IndexReader::reload()` from being garbage collected.
/// It makes it possible for another process to safely consume /// It makes it possible for another process to safely consume
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics /// our index in-writing. Ideally, we may have prefered `RWLock` semantics
/// here, but it is difficult to achieve on Windows. /// here, but it is difficult to achieve on Windows.

View File

@@ -73,6 +73,14 @@ pub enum OpenDirectoryError {
DoesNotExist(PathBuf), DoesNotExist(PathBuf),
/// The path exists but is not a directory. /// The path exists but is not a directory.
NotADirectory(PathBuf), NotADirectory(PathBuf),
/// IoError
IoError(io::Error),
}
impl From<io::Error> for OpenDirectoryError {
fn from(io_err: io::Error) -> Self {
OpenDirectoryError::IoError(io_err)
}
} }
impl fmt::Display for OpenDirectoryError { impl fmt::Display for OpenDirectoryError {
@@ -84,6 +92,11 @@ impl fmt::Display for OpenDirectoryError {
OpenDirectoryError::NotADirectory(ref path) => { OpenDirectoryError::NotADirectory(ref path) => {
write!(f, "the path '{:?}' exists but is not a directory", path) write!(f, "the path '{:?}' exists but is not a directory", path)
} }
OpenDirectoryError::IoError(ref err) => write!(
f,
"IOError while trying to open/create the directory. {:?}",
err
),
} }
} }
} }

View File

@@ -4,6 +4,7 @@ use directory::DirectoryLock;
use directory::Lock; use directory::Lock;
use directory::META_LOCK; use directory::META_LOCK;
use directory::{ReadOnlySource, WritePtr}; use directory::{ReadOnlySource, WritePtr};
use directory::{WatchCallback, WatchHandle};
use error::DataCorruption; use error::DataCorruption;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
@@ -241,6 +242,10 @@ impl Directory for ManagedDirectory {
fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> { fn acquire_lock(&self, lock: &Lock) -> result::Result<DirectoryLock, LockError> {
self.directory.acquire_lock(lock) self.directory.acquire_lock(lock)
} }
fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
self.directory.watch(watch_callback)
}
} }
impl Clone for ManagedDirectory { impl Clone for ManagedDirectory {
@@ -255,95 +260,98 @@ impl Clone for ManagedDirectory {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use directory::MmapDirectory; mod mmap_specific {
use std::io::Write;
use std::path::Path;
use tempdir::TempDir;
lazy_static! { use super::super::*;
static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test"); use std::path::Path;
static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2"); use tempdir::TempDir;
}
#[test] lazy_static! {
#[cfg(feature = "mmap")] static ref TEST_PATH1: &'static Path = Path::new("some_path_for_test");
fn test_managed_directory() { static ref TEST_PATH2: &'static Path = Path::new("some_path_for_test2");
let tempdir = TempDir::new("index").unwrap(); }
let tempdir_path = PathBuf::from(tempdir.path());
{ use directory::MmapDirectory;
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); use std::io::Write;
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
#[test]
fn test_managed_directory() {
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
{ {
let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
write_file.flush().unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
{
let mut write_file = managed_directory.open_write(*TEST_PATH1).unwrap();
write_file.flush().unwrap();
}
{
managed_directory
.atomic_write(*TEST_PATH2, &vec![0u8, 1u8])
.unwrap();
}
{
assert!(managed_directory.exists(*TEST_PATH1));
assert!(managed_directory.exists(*TEST_PATH2));
}
{
let living_files: HashSet<PathBuf> =
[TEST_PATH1.to_owned()].into_iter().cloned().collect();
managed_directory.garbage_collect(|| living_files);
}
{
assert!(managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2));
}
} }
{ {
managed_directory let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
.atomic_write(*TEST_PATH2, &vec![0u8, 1u8]) let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
.unwrap(); {
} assert!(managed_directory.exists(*TEST_PATH1));
{ assert!(!managed_directory.exists(*TEST_PATH2));
assert!(managed_directory.exists(*TEST_PATH1)); }
assert!(managed_directory.exists(*TEST_PATH2)); {
} let living_files: HashSet<PathBuf> = HashSet::new();
{ managed_directory.garbage_collect(|| living_files);
let living_files: HashSet<PathBuf> = }
[TEST_PATH1.to_owned()].into_iter().cloned().collect(); {
managed_directory.garbage_collect(|| living_files); assert!(!managed_directory.exists(*TEST_PATH1));
} assert!(!managed_directory.exists(*TEST_PATH2));
{ }
assert!(managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2));
} }
} }
{
#[test]
fn test_managed_directory_gc_while_mmapped() {
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let living_files = HashSet::new();
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
{ managed_directory
assert!(managed_directory.exists(*TEST_PATH1)); .atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
assert!(!managed_directory.exists(*TEST_PATH2)); .unwrap();
}
{
let living_files: HashSet<PathBuf> = HashSet::new();
managed_directory.garbage_collect(|| living_files);
}
{
assert!(!managed_directory.exists(*TEST_PATH1));
assert!(!managed_directory.exists(*TEST_PATH2));
}
}
}
#[test]
#[cfg(feature = "mmap ")]
fn test_managed_directory_gc_while_mmapped() {
let tempdir = TempDir::new("index").unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let living_files = HashSet::new();
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
managed_directory
.atomic_write(*TEST_PATH1, &vec![0u8, 1u8])
.unwrap();
assert!(managed_directory.exists(*TEST_PATH1));
let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
managed_directory.garbage_collect(|| living_files.clone());
if cfg!(target_os = "windows") {
// On Windows, gc should try and fail the file as it is mmapped.
assert!(managed_directory.exists(*TEST_PATH1)); assert!(managed_directory.exists(*TEST_PATH1));
// unmap should happen here.
drop(_mmap_read); let _mmap_read = managed_directory.open_read(*TEST_PATH1).unwrap();
// The file should still be in the list of managed file and managed_directory.garbage_collect(|| living_files.clone());
// eventually be deleted once mmap is released. if cfg!(target_os = "windows") {
managed_directory.garbage_collect(|| living_files); // On Windows, gc should try and fail the file as it is mmapped.
assert!(!managed_directory.exists(*TEST_PATH1)); assert!(managed_directory.exists(*TEST_PATH1));
} else { // unmap should happen here.
assert!(!managed_directory.exists(*TEST_PATH1)); drop(_mmap_read);
// The file should still be in the list of managed file and
// eventually be deleted once mmap is released.
managed_directory.garbage_collect(|| living_files);
assert!(!managed_directory.exists(*TEST_PATH1));
} else {
assert!(!managed_directory.exists(*TEST_PATH1));
}
} }
} }
} }

View File

@@ -1,18 +1,24 @@
extern crate fs2; extern crate fs2;
extern crate notify;
use self::fs2::FileExt; use self::fs2::FileExt;
use self::notify::RawEvent;
use self::notify::RecursiveMode;
use self::notify::Watcher;
use atomicwrites; use atomicwrites;
use common::make_io_err; use core::META_FILEPATH;
use directory::error::LockError; use directory::error::LockError;
use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError}; use directory::error::{DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
use directory::shared_vec_slice::SharedVecSlice; use directory::read_only_source::BoxedData;
use directory::Directory; use directory::Directory;
use directory::DirectoryLock; use directory::DirectoryLock;
use directory::Lock; use directory::Lock;
use directory::ReadOnlySource; use directory::ReadOnlySource;
use directory::WatchCallback;
use directory::WatchCallbackList;
use directory::WatchHandle;
use directory::WritePtr; use directory::WritePtr;
use fst::raw::MmapReadOnly; use memmap::Mmap;
use std::collections::hash_map::Entry as HashMapEntry;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::From; use std::convert::From;
use std::fmt; use std::fmt;
@@ -22,14 +28,22 @@ use std::io::{self, Seek, SeekFrom};
use std::io::{BufWriter, Read, Write}; use std::io::{BufWriter, Read, Write};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::result; use std::result;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc; use std::sync::Arc;
use std::sync::Mutex;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::Weak;
use std::thread;
use tempdir::TempDir; use tempdir::TempDir;
/// Create a default io error given a string.
pub(crate) fn make_io_err(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg)
}
/// Returns None iff the file exists, can be read, but is empty (and hence /// Returns None iff the file exists, can be read, but is empty (and hence
/// cannot be mmapped). /// cannot be mmapped)
/// fn open_mmap(full_path: &Path) -> result::Result<Option<Mmap>, OpenReadError> {
fn open_mmap(full_path: &Path) -> result::Result<Option<MmapReadOnly>, OpenReadError> {
let file = File::open(full_path).map_err(|e| { let file = File::open(full_path).map_err(|e| {
if e.kind() == io::ErrorKind::NotFound { if e.kind() == io::ErrorKind::NotFound {
OpenReadError::FileDoesNotExist(full_path.to_owned()) OpenReadError::FileDoesNotExist(full_path.to_owned())
@@ -48,7 +62,7 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<MmapReadOnly>, OpenReadE
return Ok(None); return Ok(None);
} }
unsafe { unsafe {
MmapReadOnly::open(&file) memmap::Mmap::map(&file)
.map(Some) .map(Some)
.map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e))) .map_err(|e| From::from(IOError::with_path(full_path.to_owned(), e)))
} }
@@ -71,7 +85,7 @@ pub struct CacheInfo {
struct MmapCache { struct MmapCache {
counters: CacheCounters, counters: CacheCounters,
cache: HashMap<PathBuf, MmapReadOnly>, cache: HashMap<PathBuf, Weak<BoxedData>>,
} }
impl Default for MmapCache { impl Default for MmapCache {
@@ -84,12 +98,7 @@ impl Default for MmapCache {
} }
impl MmapCache { impl MmapCache {
/// Removes a `MmapReadOnly` entry from the mmap cache. fn get_info(&self) -> CacheInfo {
fn discard_from_cache(&mut self, full_path: &Path) -> bool {
self.cache.remove(full_path).is_some()
}
fn get_info(&mut self) -> CacheInfo {
let paths: Vec<PathBuf> = self.cache.keys().cloned().collect(); let paths: Vec<PathBuf> = self.cache.keys().cloned().collect();
CacheInfo { CacheInfo {
counters: self.counters.clone(), counters: self.counters.clone(),
@@ -97,26 +106,108 @@ impl MmapCache {
} }
} }
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<MmapReadOnly>, OpenReadError> { fn remove_weak_ref(&mut self) {
Ok(match self.cache.entry(full_path.to_owned()) { let keys_to_remove: Vec<PathBuf> = self
HashMapEntry::Occupied(occupied_entry) => { .cache
let mmap = occupied_entry.get(); .iter()
.filter(|(_, mmap_weakref)| mmap_weakref.upgrade().is_none())
.map(|(key, _)| key.clone())
.collect();
for key in keys_to_remove {
self.cache.remove(&key);
}
}
// Returns None if the file exists but as a len of 0 (and hence is not mmappable).
fn get_mmap(&mut self, full_path: &Path) -> Result<Option<Arc<BoxedData>>, OpenReadError> {
if let Some(mmap_weak) = self.cache.get(full_path) {
if let Some(mmap_arc) = mmap_weak.upgrade() {
self.counters.hit += 1; self.counters.hit += 1;
Some(mmap.clone()) return Ok(Some(mmap_arc));
}
HashMapEntry::Vacant(vacant_entry) => {
self.counters.miss += 1;
if let Some(mmap) = open_mmap(full_path)? {
vacant_entry.insert(mmap.clone());
Some(mmap)
} else {
None
}
} }
}
self.cache.remove(full_path);
self.counters.miss += 1;
Ok(if let Some(mmap) = open_mmap(full_path)? {
let mmap_arc: Arc<BoxedData> = Arc::new(Box::new(mmap));
let mmap_weak = Arc::downgrade(&mmap_arc);
self.cache.insert(full_path.to_owned(), mmap_weak);
Some(mmap_arc)
} else {
None
}) })
} }
} }
struct InnerWatcherWrapper {
_watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: WatchCallbackList,
}
impl InnerWatcherWrapper {
pub fn new(path: &Path) -> Result<(Self, Receiver<notify::RawEvent>), notify::Error> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let mut watcher = notify::raw_watcher(tx)?;
watcher.watch(path, RecursiveMode::Recursive)?;
let inner = InnerWatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router: Default::default(),
};
Ok((inner, watcher_recv))
}
}
#[derive(Clone)]
pub(crate) struct WatcherWrapper {
inner: Arc<InnerWatcherWrapper>,
}
impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (inner, watcher_recv) = InnerWatcherWrapper::new(path).map_err(|err| match err {
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
_ => {
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_wrapper = WatcherWrapper {
inner: Arc::new(inner),
};
let watcher_wrapper_clone = watcher_wrapper.clone();
thread::Builder::new()
.name("meta-file-watch-thread".to_string())
.spawn(move || {
loop {
match watcher_recv.recv().map(|evt| evt.path) {
Ok(Some(changed_path)) => {
// ... Actually subject to false positive.
// We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH {
watcher_wrapper_clone.inner.watcher_router.broadcast();
}
}
}
Ok(None) => {
// not an event we are interested in.
}
Err(_e) => {
// the watch send channel was dropped
break;
}
}
}
})
.expect("Failed to spawn thread to watch meta.json");
Ok(watcher_wrapper)
}
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
self.inner.watcher_router.subscribe(watch_callback)
}
}
/// Directory storing data in files, read via mmap. /// Directory storing data in files, read via mmap.
/// ///
/// The Mmap object are cached to limit the /// The Mmap object are cached to limit the
@@ -131,31 +222,62 @@ impl MmapCache {
/// On Windows the semantics are again different. /// On Windows the semantics are again different.
#[derive(Clone)] #[derive(Clone)]
pub struct MmapDirectory { pub struct MmapDirectory {
inner: Arc<MmapDirectoryInner>,
}
struct MmapDirectoryInner {
root_path: PathBuf, root_path: PathBuf,
mmap_cache: Arc<RwLock<MmapCache>>, mmap_cache: RwLock<MmapCache>,
_temp_directory: Arc<Option<TempDir>>, _temp_directory: Option<TempDir>,
watcher: RwLock<WatcherWrapper>,
}
impl MmapDirectoryInner {
fn new(
root_path: PathBuf,
temp_directory: Option<TempDir>,
) -> Result<MmapDirectoryInner, OpenDirectoryError> {
let watch_wrapper = WatcherWrapper::new(&root_path)?;
let mmap_directory_inner = MmapDirectoryInner {
root_path,
mmap_cache: Default::default(),
_temp_directory: temp_directory,
watcher: RwLock::new(watch_wrapper),
};
Ok(mmap_directory_inner)
}
fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
let mut wlock = self.watcher.write().unwrap();
wlock.watch(watch_callback)
}
} }
impl fmt::Debug for MmapDirectory { impl fmt::Debug for MmapDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "MmapDirectory({:?})", self.root_path) write!(f, "MmapDirectory({:?})", self.inner.root_path)
} }
} }
impl MmapDirectory { impl MmapDirectory {
fn new(
root_path: PathBuf,
temp_directory: Option<TempDir>,
) -> Result<MmapDirectory, OpenDirectoryError> {
let inner = MmapDirectoryInner::new(root_path, temp_directory)?;
Ok(MmapDirectory {
inner: Arc::new(inner),
})
}
/// Creates a new MmapDirectory in a temporary directory. /// Creates a new MmapDirectory in a temporary directory.
/// ///
/// This is mostly useful to test the MmapDirectory itself. /// This is mostly useful to test the MmapDirectory itself.
/// For your unit tests, prefer the RAMDirectory. /// For your unit tests, prefer the RAMDirectory.
pub fn create_from_tempdir() -> io::Result<MmapDirectory> { pub fn create_from_tempdir() -> Result<MmapDirectory, OpenDirectoryError> {
let tempdir = TempDir::new("index")?; let tempdir = TempDir::new("index").map_err(OpenDirectoryError::IoError)?;
let tempdir_path = PathBuf::from(tempdir.path()); let tempdir_path = PathBuf::from(tempdir.path());
let directory = MmapDirectory { MmapDirectory::new(tempdir_path, Some(tempdir))
root_path: tempdir_path,
mmap_cache: Arc::new(RwLock::new(MmapCache::default())),
_temp_directory: Arc::new(Some(tempdir)),
};
Ok(directory)
} }
/// Opens a MmapDirectory in a directory. /// Opens a MmapDirectory in a directory.
@@ -173,18 +295,14 @@ impl MmapDirectory {
directory_path, directory_path,
))) )))
} else { } else {
Ok(MmapDirectory { Ok(MmapDirectory::new(PathBuf::from(directory_path), None)?)
root_path: PathBuf::from(directory_path),
mmap_cache: Arc::new(RwLock::new(MmapCache::default())),
_temp_directory: Arc::new(None),
})
} }
} }
/// Joins a relative_path to the directory `root_path` /// Joins a relative_path to the directory `root_path`
/// to create a proper complete `filepath`. /// to create a proper complete `filepath`.
fn resolve_path(&self, relative_path: &Path) -> PathBuf { fn resolve_path(&self, relative_path: &Path) -> PathBuf {
self.root_path.join(relative_path) self.inner.root_path.join(relative_path)
} }
/// Sync the root directory. /// Sync the root directory.
@@ -209,7 +327,7 @@ impl MmapDirectory {
.custom_flags(winbase::FILE_FLAG_BACKUP_SEMANTICS); .custom_flags(winbase::FILE_FLAG_BACKUP_SEMANTICS);
} }
let fd = open_opts.open(&self.root_path)?; let fd = open_opts.open(&self.inner.root_path)?;
fd.sync_all()?; fd.sync_all()?;
Ok(()) Ok(())
} }
@@ -219,9 +337,15 @@ impl MmapDirectory {
/// ///
/// The `MmapDirectory` embeds a `MmapDirectory` /// The `MmapDirectory` embeds a `MmapDirectory`
/// to avoid multiplying the `mmap` system calls. /// to avoid multiplying the `mmap` system calls.
pub fn get_cache_info(&mut self) -> CacheInfo { pub fn get_cache_info(&self) -> CacheInfo {
self.mmap_cache self.inner
.mmap_cache
.write() .write()
.expect("mmap cache lock is poisoned")
.remove_weak_ref();
self.inner
.mmap_cache
.read()
.expect("Mmap cache lock is poisoned.") .expect("Mmap cache lock is poisoned.")
.get_info() .get_info()
} }
@@ -244,7 +368,7 @@ impl Drop for ReleaseLockFile {
/// This Write wraps a File, but has the specificity of /// This Write wraps a File, but has the specificity of
/// call `sync_all` on flush. /// call `sync_all` on flush.
struct SafeFileWriter(File); pub struct SafeFileWriter(File);
impl SafeFileWriter { impl SafeFileWriter {
fn new(file: File) -> SafeFileWriter { fn new(file: File) -> SafeFileWriter {
@@ -274,7 +398,7 @@ impl Directory for MmapDirectory {
debug!("Open Read {:?}", path); debug!("Open Read {:?}", path);
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
let mut mmap_cache = self.mmap_cache.write().map_err(|_| { let mut mmap_cache = self.inner.mmap_cache.write().map_err(|_| {
let msg = format!( let msg = format!(
"Failed to acquired write lock \ "Failed to acquired write lock \
on mmap cache while reading {:?}", on mmap cache while reading {:?}",
@@ -282,11 +406,34 @@ impl Directory for MmapDirectory {
); );
IOError::with_path(path.to_owned(), make_io_err(msg)) IOError::with_path(path.to_owned(), make_io_err(msg))
})?; })?;
Ok(mmap_cache Ok(mmap_cache
.get_mmap(&full_path)? .get_mmap(&full_path)?
.map(ReadOnlySource::Mmap) .map(ReadOnlySource::from)
.unwrap_or_else(|| ReadOnlySource::Anonymous(SharedVecSlice::empty()))) .unwrap_or_else(ReadOnlySource::empty))
}
/// Any entry associated to the path in the mmap will be
/// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
debug!("Deleting file {:?}", path);
let full_path = self.resolve_path(path);
match fs::remove_file(&full_path) {
Ok(_) => self
.sync_directory()
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(DeleteError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
} }
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
@@ -319,44 +466,6 @@ impl Directory for MmapDirectory {
Ok(BufWriter::new(Box::new(writer))) Ok(BufWriter::new(Box::new(writer)))
} }
/// Any entry associated to the path in the mmap will be
/// removed before the file is deleted.
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
debug!("Deleting file {:?}", path);
let full_path = self.resolve_path(path);
let mut mmap_cache = self.mmap_cache.write().map_err(|_| {
let msg = format!(
"Failed to acquired write lock \
on mmap cache while deleting {:?}",
path
);
IOError::with_path(path.to_owned(), make_io_err(msg))
})?;
mmap_cache.discard_from_cache(path);
// Removing the entry in the MMap cache.
// The munmap will appear on Drop,
// when the last reference is gone.
mmap_cache.cache.remove(&full_path);
match fs::remove_file(&full_path) {
Ok(_) => self
.sync_directory()
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
Err(e) => {
if e.kind() == io::ErrorKind::NotFound {
Err(DeleteError::FileDoesNotExist(path.to_owned()))
} else {
Err(IOError::with_path(path.to_owned(), e).into())
}
}
}
}
fn exists(&self, path: &Path) -> bool {
let full_path = self.resolve_path(path);
full_path.exists()
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> { fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let full_path = self.resolve_path(path); let full_path = self.resolve_path(path);
let mut buffer = Vec::new(); let mut buffer = Vec::new();
@@ -403,6 +512,10 @@ impl Directory for MmapDirectory {
_file: file, _file: file,
}))) })))
} }
fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
self.inner.watch(watch_callback)
}
} }
#[cfg(test)] #[cfg(test)]
@@ -412,6 +525,13 @@ mod tests {
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;
use schema::{Schema, SchemaBuilder, TEXT};
use std::fs;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
use Index;
use ReloadPolicy;
#[test] #[test]
fn test_open_non_existant_path() { fn test_open_non_existant_path() {
@@ -436,7 +556,7 @@ mod tests {
#[test] #[test]
fn test_cache() { fn test_cache() {
let content = "abc".as_bytes(); let content = b"abc";
// here we test if the cache releases // here we test if the cache releases
// mmaps correctly. // mmaps correctly.
@@ -452,26 +572,104 @@ mod tests {
w.flush().unwrap(); w.flush().unwrap();
} }
} }
{
for (i, path) in paths.iter().enumerate() { let mut keep = vec![];
let _r = mmap_directory.open_read(path).unwrap(); for (i, path) in paths.iter().enumerate() {
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), i + 1); keep.push(mmap_directory.open_read(path).unwrap());
} assert_eq!(mmap_directory.get_cache_info().mmapped.len(), i + 1);
for path in paths.iter() { }
let _r = mmap_directory.open_read(path).unwrap(); assert_eq!(mmap_directory.get_cache_info().counters.hit, 0);
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), num_paths); assert_eq!(mmap_directory.get_cache_info().counters.miss, 10);
} assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
for (i, path) in paths.iter().enumerate() { for path in paths.iter() {
mmap_directory.delete(path).unwrap(); let _r = mmap_directory.open_read(path).unwrap();
assert_eq!( assert_eq!(mmap_directory.get_cache_info().mmapped.len(), num_paths);
mmap_directory.get_cache_info().mmapped.len(),
num_paths - i - 1
);
}
} }
assert_eq!(mmap_directory.get_cache_info().counters.hit, 10); assert_eq!(mmap_directory.get_cache_info().counters.hit, 10);
assert_eq!(mmap_directory.get_cache_info().counters.miss, 10); assert_eq!(mmap_directory.get_cache_info().counters.miss, 10);
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
for path in paths.iter() {
let _r = mmap_directory.open_read(path).unwrap();
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
}
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
assert_eq!(mmap_directory.get_cache_info().counters.miss, 10);
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 10);
drop(keep);
for path in paths.iter() {
let _r = mmap_directory.open_read(path).unwrap();
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 1);
}
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
assert_eq!(mmap_directory.get_cache_info().counters.miss, 20);
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
for path in &paths {
mmap_directory.delete(path).unwrap();
}
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
assert_eq!(mmap_directory.get_cache_info().counters.miss, 20);
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
for path in paths.iter() {
assert!(mmap_directory.open_read(path).is_err());
}
assert_eq!(mmap_directory.get_cache_info().counters.hit, 20);
assert_eq!(mmap_directory.get_cache_info().counters.miss, 30);
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0); assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
} }
#[test]
fn test_watch_wrapper() {
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let tmp_dir: TempDir = tempdir::TempDir::new("test_watch_wrapper").unwrap();
let tmp_dirpath = tmp_dir.path().to_owned();
let mut watch_wrapper = WatcherWrapper::new(&tmp_dirpath).unwrap();
let tmp_file = tmp_dirpath.join("coucou");
let _handle = watch_wrapper.watch(Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
}));
assert_eq!(counter.load(Ordering::SeqCst), 0);
fs::write(&tmp_file, b"whateverwilldo").unwrap();
thread::sleep(Duration::new(0, 1_000u32));
}
#[test]
fn test_mmap_released() {
let mmap_directory = MmapDirectory::create_from_tempdir().unwrap();
let mut schema_builder: SchemaBuilder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
{
let index = Index::create(mmap_directory.clone(), schema).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for _num_commits in 0..16 {
for _ in 0..10 {
index_writer.add_document(doc!(text_field=>"abc"));
}
index_writer.commit().unwrap();
}
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
for _ in 0..30 {
index_writer.add_document(doc!(text_field=>"abc"));
index_writer.commit().unwrap();
reader.reload().unwrap();
}
index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap();
let num_segments = reader.searcher().segment_readers().len();
assert_eq!(num_segments, 4);
assert_eq!(
num_segments * 7,
mmap_directory.get_cache_info().mmapped.len()
);
}
assert_eq!(mmap_directory.get_cache_info().mmapped.len(), 0);
}
} }

View File

@@ -12,7 +12,8 @@ mod directory_lock;
mod managed_directory; mod managed_directory;
mod ram_directory; mod ram_directory;
mod read_only_source; mod read_only_source;
mod shared_vec_slice; mod watch_event_router;
mod nrt_directory;
/// Errors specific to the directory module. /// Errors specific to the directory module.
pub mod error; pub mod error;
@@ -22,6 +23,8 @@ pub use self::directory::{Directory, DirectoryClone};
pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK}; pub use self::directory_lock::{Lock, INDEX_WRITER_LOCK, META_LOCK};
pub use self::ram_directory::RAMDirectory; pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{BufWriter, Seek, Write}; use std::io::{BufWriter, Seek, Write};
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]

View File

@@ -0,0 +1,195 @@
use directory::Directory;
use std::path::{PathBuf, Path};
use directory::ReadOnlySource;
use directory::error::OpenReadError;
use directory::error::DeleteError;
use std::io::{BufWriter, Cursor};
use directory::SeekableWrite;
use directory::error::OpenWriteError;
use directory::WatchHandle;
use directory::ram_directory::InnerRamDirectory;
use std::sync::RwLock;
use std::sync::Arc;
use directory::WatchCallback;
use std::fmt;
use std::io;
use std::io::{Seek, Write};
use directory::DirectoryClone;
const BUFFER_LEN: usize = 1_000_000;
pub enum NRTWriter {
InRam {
buffer: Cursor<Vec<u8>>,
path: PathBuf,
nrt_directory: NRTDirectory
},
UnderlyingFile(BufWriter<Box<SeekableWrite>>)
}
impl NRTWriter {
pub fn new(path: PathBuf, nrt_directory: NRTDirectory) -> NRTWriter {
NRTWriter::InRam {
buffer: Cursor::new(Vec::with_capacity(BUFFER_LEN)),
path,
nrt_directory,
}
}
}
impl io::Seek for NRTWriter {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
match self {
NRTWriter::InRam { buffer, path, nrt_directory } => {
buffer.seek(pos)
}
NRTWriter::UnderlyingFile(file) => {
file.seek(pos)
}
}
}
}
impl io::Write for NRTWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_all(buf)?;
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
match self {
NRTWriter::InRam { buffer, path, nrt_directory } => {
let mut cache_wlock = nrt_directory.cache.write().unwrap();
cache_wlock.write(path.clone(), buffer.get_ref());
Ok(())
}
NRTWriter::UnderlyingFile(file) => {
file.flush()
}
}
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
// Working around the borrow checker.
let mut underlying_write_opt: Option<BufWriter<Box<SeekableWrite>>> = None;
if let NRTWriter::InRam { buffer, path, nrt_directory } = self {
if buffer.get_ref().len() + buf.len() > BUFFER_LEN {
// We can't keep this in RAM. Let's move it to the underlying directory.
underlying_write_opt = Some(nrt_directory.open_write(path)
.map_err(|open_err| {
io::Error::new(io::ErrorKind::Other, open_err)
})?);
}
}
if let Some(underlying_write) = underlying_write_opt {
*self = NRTWriter::UnderlyingFile(underlying_write);
}
match self {
NRTWriter::InRam { buffer, path, nrt_directory } => {
assert!(buffer.get_ref().len() + buf.len() <= BUFFER_LEN);
buffer.write_all(buf)
}
NRTWriter::UnderlyingFile(file) => {
file.write_all(buf)
}
}
}
}
pub struct NRTDirectory {
underlying: Box<Directory>,
cache: Arc<RwLock<InnerRamDirectory>>,
}
impl Clone for NRTDirectory {
fn clone(&self) -> Self {
NRTDirectory {
underlying: self.underlying.box_clone(),
cache: self.cache.clone()
}
}
}
impl NRTDirectory {
fn wrap(underlying: Box<Directory>) -> NRTDirectory {
NRTDirectory {
underlying,
cache: Default::default()
}
}
}
impl fmt::Debug for NRTDirectory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NRTDirectory({:?})", self.underlying)
}
}
impl Directory for NRTDirectory {
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
unimplemented!()
}
fn delete(&self, path: &Path) -> Result<(), DeleteError> {
// We explicitly release the lock, to prevent a panic on the underlying directory
// to poison the lock.
//
// File can only go from cache to underlying so the result does not lead to
// any inconsistency.
{
let mut cache_wlock = self.cache.write().unwrap();
if cache_wlock.exists(path) {
return cache_wlock.delete(path);
}
}
self.underlying.delete(path)
}
fn exists(&self, path: &Path) -> bool {
// We explicitly release the lock, to prevent a panic on the underlying directory
// to poison the lock.
//
// File can only go from cache to underlying so the result does not lead to
// any inconsistency.
{
let rlock_cache = self.cache.read().unwrap();
if rlock_cache.exists(path) {
return true;
}
}
self.underlying.exists(path)
}
fn open_write(&mut self, path: &Path) -> Result<BufWriter<Box<SeekableWrite>>, OpenWriteError> {
let mut cache_wlock = self.cache.write().unwrap();
// TODO might poison our lock. I don't know have a sound solution yet.
let path_buf = path.to_owned();
if self.underlying.exists(path) {
return Err(OpenWriteError::FileAlreadyExists(path_buf));
}
let exists = cache_wlock.write(path_buf.clone(), &[]);
// force the creation of the file to mimic the MMap directory.
if exists {
Err(OpenWriteError::FileAlreadyExists(path_buf))
} else {
let vec_writer = NRTWriter::new(path_buf.clone(), self.clone());
Ok(BufWriter::new(Box::new(vec_writer)))
}
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
self.underlying.atomic_read(path)
}
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
self.underlying.atomic_write(path, data)
}
fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
self.underlying.watch(watch_callback)
}
}

View File

@@ -1,8 +1,8 @@
use super::shared_vec_slice::SharedVecSlice; use core::META_FILEPATH;
use common::make_io_err; use directory::error::{DeleteError, OpenReadError, OpenWriteError};
use directory::error::{DeleteError, IOError, OpenReadError, OpenWriteError}; use directory::WatchCallbackList;
use directory::WritePtr; use directory::WritePtr;
use directory::{Directory, ReadOnlySource}; use directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write}; use std::io::{self, BufWriter, Cursor, Seek, SeekFrom, Write};
@@ -22,13 +22,13 @@ use std::sync::{Arc, RwLock};
/// ///
struct VecWriter { struct VecWriter {
path: PathBuf, path: PathBuf,
shared_directory: InnerDirectory, shared_directory: RAMDirectory,
data: Cursor<Vec<u8>>, data: Cursor<Vec<u8>>,
is_flushed: bool, is_flushed: bool,
} }
impl VecWriter { impl VecWriter {
fn new(path_buf: PathBuf, shared_directory: InnerDirectory) -> VecWriter { fn new(path_buf: PathBuf, shared_directory: RAMDirectory) -> VecWriter {
VecWriter { VecWriter {
path: path_buf, path: path_buf,
data: Cursor::new(Vec::new()), data: Cursor::new(Vec::new()),
@@ -64,75 +64,44 @@ impl Write for VecWriter {
fn flush(&mut self) -> io::Result<()> { fn flush(&mut self) -> io::Result<()> {
self.is_flushed = true; self.is_flushed = true;
self.shared_directory let mut fs = self.shared_directory.fs.write().unwrap();
.write(self.path.clone(), self.data.get_ref())?; fs.write(self.path.clone(), self.data.get_ref());
Ok(()) Ok(())
} }
} }
#[derive(Clone)] #[derive(Default)]
struct InnerDirectory(Arc<RwLock<HashMap<PathBuf, Arc<Vec<u8>>>>>); pub(crate) struct InnerRamDirectory {
fs: HashMap<PathBuf, ReadOnlySource>,
watch_router: WatchCallbackList,
}
impl InnerDirectory { impl InnerRamDirectory {
fn new() -> InnerDirectory { pub fn write(&mut self, path: PathBuf, data: &[u8]) -> bool {
InnerDirectory(Arc::new(RwLock::new(HashMap::new()))) let data = ReadOnlySource::new(Vec::from(data));
self.fs.insert(path, data).is_some()
} }
fn write(&self, path: PathBuf, data: &[u8]) -> io::Result<bool> { pub fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> {
let mut map = self.0.write().map_err(|_| { self.fs
make_io_err(format!( .get(path)
"Failed to lock the directory, when trying to write {:?}", .ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
path .map(|el| el.clone())
))
})?;
let prev_value = map.insert(path, Arc::new(Vec::from(data)));
Ok(prev_value.is_some())
} }
fn open_read(&self, path: &Path) -> Result<ReadOnlySource, OpenReadError> { pub fn delete(&mut self, path: &Path) -> result::Result<(), DeleteError> {
self.0 match self.fs.remove(path) {
.read() Some(_) => Ok(()),
.map_err(|_| { None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
let msg = format!( }
"Failed to acquire read lock for the \
directory when trying to read {:?}",
path
);
let io_err = make_io_err(msg);
OpenReadError::IOError(IOError::with_path(path.to_owned(), io_err))
})
.and_then(|readable_map| {
readable_map
.get(path)
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
.map(Arc::clone)
.map(|data| ReadOnlySource::Anonymous(SharedVecSlice::new(data)))
})
} }
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { pub fn exists(&self, path: &Path) -> bool {
self.0 self.fs.contains_key(path)
.write()
.map_err(|_| {
let msg = format!(
"Failed to acquire write lock for the \
directory when trying to delete {:?}",
path
);
let io_err = make_io_err(msg);
DeleteError::IOError(IOError::with_path(path.to_owned(), io_err))
})
.and_then(|mut writable_map| match writable_map.remove(path) {
Some(_) => Ok(()),
None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
})
} }
fn exists(&self, path: &Path) -> bool { pub fn watch(&mut self, watch_handle: WatchCallback) -> WatchHandle {
self.0 self.watch_router.subscribe(watch_handle)
.read()
.expect("Failed to get read lock directory.")
.contains_key(path)
} }
} }
@@ -147,33 +116,36 @@ impl fmt::Debug for RAMDirectory {
/// It is mainly meant for unit testing. /// It is mainly meant for unit testing.
/// Writes are only made visible upon flushing. /// Writes are only made visible upon flushing.
/// ///
#[derive(Clone)] #[derive(Clone, Default)]
pub struct RAMDirectory { pub struct RAMDirectory {
fs: InnerDirectory, fs: Arc<RwLock<InnerRamDirectory>>,
} }
impl RAMDirectory { impl RAMDirectory {
/// Constructor /// Constructor
pub fn create() -> RAMDirectory { pub fn create() -> RAMDirectory {
RAMDirectory { Self::default()
fs: InnerDirectory::new(),
}
} }
} }
impl Directory for RAMDirectory { impl Directory for RAMDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
self.fs.open_read(path) self.fs.read().unwrap().open_read(path)
}
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
self.fs.write().unwrap().delete(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.read().unwrap().exists(path)
} }
fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> Result<WritePtr, OpenWriteError> {
let mut fs = self.fs.write().unwrap();
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
let vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone()); let vec_writer = VecWriter::new(path_buf.clone(), self.clone());
let exists = fs.write(path_buf.clone(), &[]);
let exists = self
.fs
.write(path_buf.clone(), &Vec::new())
.map_err(|err| IOError::with_path(path.to_owned(), err))?;
// force the creation of the file to mimic the MMap directory. // force the creation of the file to mimic the MMap directory.
if exists { if exists {
Err(OpenWriteError::FileAlreadyExists(path_buf)) Err(OpenWriteError::FileAlreadyExists(path_buf))
@@ -182,17 +154,8 @@ impl Directory for RAMDirectory {
} }
} }
fn delete(&self, path: &Path) -> result::Result<(), DeleteError> {
self.fs.delete(path)
}
fn exists(&self, path: &Path) -> bool {
self.fs.exists(path)
}
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> { fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError> {
let read = self.open_read(path)?; Ok(self.open_read(path)?.as_slice().to_owned())
Ok(read.as_slice().to_owned())
} }
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -201,10 +164,20 @@ impl Directory for RAMDirectory {
msg.unwrap_or("Undefined".to_string()) msg.unwrap_or("Undefined".to_string())
))); )));
let path_buf = PathBuf::from(path); let path_buf = PathBuf::from(path);
let mut vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone());
self.fs.write(path_buf, &Vec::new())?; // Reserve the path to prevent calls to .write() to succeed.
self.fs.write().unwrap().write(path_buf.clone(), &[]);
let mut vec_writer = VecWriter::new(path_buf.clone(), self.clone());
vec_writer.write_all(data)?; vec_writer.write_all(data)?;
vec_writer.flush()?; vec_writer.flush()?;
if path == Path::new(&*META_FILEPATH) {
self.fs.write().unwrap().watch_router.broadcast();
}
Ok(()) Ok(())
} }
fn watch(&self, watch_callback: WatchCallback) -> WatchHandle {
self.fs.write().unwrap().watch(watch_callback)
}
} }

View File

@@ -1,9 +1,9 @@
use super::shared_vec_slice::SharedVecSlice;
use common::HasLen; use common::HasLen;
#[cfg(feature = "mmap")]
use fst::raw::MmapReadOnly;
use stable_deref_trait::{CloneStableDeref, StableDeref}; use stable_deref_trait::{CloneStableDeref, StableDeref};
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc;
pub type BoxedData = Box<Deref<Target = [u8]> + Send + Sync + 'static>;
/// Read object that represents files in tantivy. /// Read object that represents files in tantivy.
/// ///
@@ -11,12 +11,10 @@ use std::ops::Deref;
/// the data in the form of a constant read-only `&[u8]`. /// the data in the form of a constant read-only `&[u8]`.
/// Whatever happens to the directory file, the data /// Whatever happens to the directory file, the data
/// hold by this object should never be altered or destroyed. /// hold by this object should never be altered or destroyed.
pub enum ReadOnlySource { pub struct ReadOnlySource {
/// Mmap source of data data: Arc<BoxedData>,
#[cfg(feature = "mmap")] start: usize,
Mmap(MmapReadOnly), stop: usize,
/// Wrapping a `Vec<u8>`
Anonymous(SharedVecSlice),
} }
unsafe impl StableDeref for ReadOnlySource {} unsafe impl StableDeref for ReadOnlySource {}
@@ -30,19 +28,38 @@ impl Deref for ReadOnlySource {
} }
} }
impl From<Arc<BoxedData>> for ReadOnlySource {
fn from(data: Arc<BoxedData>) -> Self {
let len = data.len();
ReadOnlySource {
data,
start: 0,
stop: len,
}
}
}
impl ReadOnlySource { impl ReadOnlySource {
pub(crate) fn new<D>(data: D) -> ReadOnlySource
where
D: Deref<Target = [u8]> + Send + Sync + 'static,
{
let len = data.len();
ReadOnlySource {
data: Arc::new(Box::new(data)),
start: 0,
stop: len,
}
}
/// Creates an empty ReadOnlySource /// Creates an empty ReadOnlySource
pub fn empty() -> ReadOnlySource { pub fn empty() -> ReadOnlySource {
ReadOnlySource::Anonymous(SharedVecSlice::empty()) ReadOnlySource::new(&[][..])
} }
/// Returns the data underlying the ReadOnlySource object. /// Returns the data underlying the ReadOnlySource object.
pub fn as_slice(&self) -> &[u8] { pub fn as_slice(&self) -> &[u8] {
match *self { &self.data[self.start..self.stop]
#[cfg(feature = "mmap")]
ReadOnlySource::Mmap(ref mmap_read_only) => mmap_read_only.as_slice(),
ReadOnlySource::Anonymous(ref shared_vec) => shared_vec.as_slice(),
}
} }
/// Splits into 2 `ReadOnlySource`, at the offset given /// Splits into 2 `ReadOnlySource`, at the offset given
@@ -63,22 +80,18 @@ impl ReadOnlySource {
/// worth of data in anonymous memory, and only a /// worth of data in anonymous memory, and only a
/// 1KB slice is remaining, the whole `500MBs` /// 1KB slice is remaining, the whole `500MBs`
/// are retained in memory. /// are retained in memory.
pub fn slice(&self, from_offset: usize, to_offset: usize) -> ReadOnlySource { pub fn slice(&self, start: usize, stop: usize) -> ReadOnlySource {
assert!( assert!(
from_offset <= to_offset, start <= stop,
"Requested negative slice [{}..{}]", "Requested negative slice [{}..{}]",
from_offset, start,
to_offset stop
); );
match *self { assert!(stop <= self.len());
#[cfg(feature = "mmap")] ReadOnlySource {
ReadOnlySource::Mmap(ref mmap_read_only) => { data: self.data.clone(),
let sliced_mmap = mmap_read_only.range(from_offset, to_offset - from_offset); start: self.start + start,
ReadOnlySource::Mmap(sliced_mmap) stop: self.start + stop,
}
ReadOnlySource::Anonymous(ref shared_vec) => {
ReadOnlySource::Anonymous(shared_vec.slice(from_offset, to_offset))
}
} }
} }
@@ -87,8 +100,7 @@ impl ReadOnlySource {
/// ///
/// Equivalent to `.slice(from_offset, self.len())` /// Equivalent to `.slice(from_offset, self.len())`
pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource { pub fn slice_from(&self, from_offset: usize) -> ReadOnlySource {
let len = self.len(); self.slice(from_offset, self.len())
self.slice(from_offset, len)
} }
/// Like `.slice(...)` but enforcing only the `to` /// Like `.slice(...)` but enforcing only the `to`
@@ -102,19 +114,18 @@ impl ReadOnlySource {
impl HasLen for ReadOnlySource { impl HasLen for ReadOnlySource {
fn len(&self) -> usize { fn len(&self) -> usize {
self.as_slice().len() self.stop - self.start
} }
} }
impl Clone for ReadOnlySource { impl Clone for ReadOnlySource {
fn clone(&self) -> Self { fn clone(&self) -> Self {
self.slice(0, self.len()) self.slice_from(0)
} }
} }
impl From<Vec<u8>> for ReadOnlySource { impl From<Vec<u8>> for ReadOnlySource {
fn from(data: Vec<u8>) -> ReadOnlySource { fn from(data: Vec<u8>) -> ReadOnlySource {
let shared_data = SharedVecSlice::from(data); ReadOnlySource::new(data)
ReadOnlySource::Anonymous(shared_data)
} }
} }

View File

@@ -1,41 +0,0 @@
use std::sync::Arc;
#[derive(Clone)]
pub struct SharedVecSlice {
pub data: Arc<Vec<u8>>,
pub start: usize,
pub len: usize,
}
impl SharedVecSlice {
pub fn empty() -> SharedVecSlice {
SharedVecSlice::new(Arc::new(Vec::new()))
}
pub fn new(data: Arc<Vec<u8>>) -> SharedVecSlice {
let data_len = data.len();
SharedVecSlice {
data,
start: 0,
len: data_len,
}
}
pub fn as_slice(&self) -> &[u8] {
&self.data[self.start..self.start + self.len]
}
pub fn slice(&self, from_offset: usize, to_offset: usize) -> SharedVecSlice {
SharedVecSlice {
data: Arc::clone(&self.data),
start: self.start + from_offset,
len: to_offset - from_offset,
}
}
}
impl From<Vec<u8>> for SharedVecSlice {
fn from(data: Vec<u8>) -> SharedVecSlice {
SharedVecSlice::new(Arc::new(data))
}
}

View File

@@ -1,7 +1,13 @@
use super::*; use super::*;
use std::io::{Seek, SeekFrom, Write}; use std::io::{Seek, SeekFrom, Write};
use std::mem;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::thread;
use std::time; use std::time;
use std::time::Duration;
lazy_static! { lazy_static! {
static ref TEST_PATH: &'static Path = Path::new("some_path_for_test"); static ref TEST_PATH: &'static Path = Path::new("some_path_for_test");
@@ -30,19 +36,18 @@ fn ram_directory_panics_if_flush_forgotten() {
fn test_simple(directory: &mut Directory) { fn test_simple(directory: &mut Directory) {
{ {
{ let mut write_file = directory.open_write(*TEST_PATH).unwrap();
let mut write_file = directory.open_write(*TEST_PATH).unwrap(); assert!(directory.exists(*TEST_PATH));
assert!(directory.exists(*TEST_PATH)); write_file.write_all(&[4]).unwrap();
write_file.write_all(&[4]).unwrap(); write_file.write_all(&[3]).unwrap();
write_file.write_all(&[3]).unwrap(); write_file.write_all(&[7, 3, 5]).unwrap();
write_file.write_all(&[7, 3, 5]).unwrap(); write_file.flush().unwrap();
write_file.flush().unwrap(); }
} {
let read_file = directory.open_read(*TEST_PATH).unwrap(); let read_file = directory.open_read(*TEST_PATH).unwrap();
let data: &[u8] = &*read_file; let data: &[u8] = &*read_file;
assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]); assert_eq!(data, &[4u8, 3u8, 7u8, 3u8, 5u8]);
} }
assert!(directory.delete(*TEST_PATH).is_ok()); assert!(directory.delete(*TEST_PATH).is_ok());
assert!(!directory.exists(*TEST_PATH)); assert!(!directory.exists(*TEST_PATH));
} }
@@ -121,6 +126,41 @@ fn test_directory(directory: &mut Directory) {
test_directory_delete(directory); test_directory_delete(directory);
test_lock_non_blocking(directory); test_lock_non_blocking(directory);
test_lock_blocking(directory); test_lock_blocking(directory);
test_watch(directory);
}
fn test_watch(directory: &mut Directory) {
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let watch_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
});
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok());
thread::sleep(Duration::new(0, 10_000));
assert_eq!(0, counter.load(Ordering::SeqCst));
let watch_handle = directory.watch(watch_callback);
for i in 0..10 {
assert_eq!(i, counter.load(Ordering::SeqCst));
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok());
for _ in 0..100 {
if counter.load(Ordering::SeqCst) > i {
break;
}
thread::sleep(Duration::from_millis(10));
}
assert_eq!(i + 1, counter.load(Ordering::SeqCst));
}
mem::drop(watch_handle);
assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data")
.is_ok());
thread::sleep(Duration::from_millis(200));
assert_eq!(10, counter.load(Ordering::SeqCst));
} }
fn test_lock_non_blocking(directory: &mut Directory) { fn test_lock_non_blocking(directory: &mut Directory) {

View File

@@ -0,0 +1,156 @@
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::Weak;
/// Type alias for callbacks registered when watching files of a `Directory`.
pub type WatchCallback = Box<Fn() -> () + Sync + Send>;
/// Helper struct to implement the watch method in `Directory` implementations.
///
/// It registers callbacks (See `.subscribe(...)`) and
/// calls them upon calls to `.broadcast(...)`.
#[derive(Default)]
pub struct WatchCallbackList {
router: RwLock<Vec<Weak<WatchCallback>>>,
}
/// Controls how long a directory should watch for a file change.
///
/// After all the clones of `WatchHandle` are dropped, the associated will not be called when a
/// file change is detected.
#[must_use = "This `WatchHandle` controls the lifetime of the watch and should therefore be used."]
#[derive(Clone)]
pub struct WatchHandle(Arc<WatchCallback>);
impl WatchCallbackList {
/// Suscribes a new callback and returns a handle that controls the lifetime of the callback.
pub fn subscribe(&self, watch_callback: WatchCallback) -> WatchHandle {
let watch_callback_arc = Arc::new(watch_callback);
let watch_callback_weak = Arc::downgrade(&watch_callback_arc);
self.router.write().unwrap().push(watch_callback_weak);
WatchHandle(watch_callback_arc)
}
fn list_callback(&self) -> Vec<Arc<WatchCallback>> {
let mut callbacks = vec![];
let mut router_wlock = self.router.write().unwrap();
let mut i = 0;
while i < router_wlock.len() {
if let Some(watch) = router_wlock[i].upgrade() {
callbacks.push(watch);
i += 1;
} else {
router_wlock.swap_remove(i);
}
}
callbacks
}
/// Triggers all callbacks
pub fn broadcast(&self) {
let callbacks = self.list_callback();
let spawn_res = std::thread::Builder::new()
.name("watch-callbacks".to_string())
.spawn(move || {
for callback in callbacks {
callback();
}
});
if let Err(err) = spawn_res {
error!(
"Failed to spawn thread to call watch callbacks. Cause: {:?}",
err
);
}
}
}
#[cfg(test)]
mod tests {
use directory::WatchCallbackList;
use std::mem;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
const WAIT_TIME: u64 = 20;
#[test]
fn test_watch_event_router_simple() {
let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
});
watch_event_router.broadcast();
assert_eq!(0, counter.load(Ordering::SeqCst));
let handle_a = watch_event_router.subscribe(inc_callback);
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst));
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(1, counter.load(Ordering::SeqCst));
watch_event_router.broadcast();
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(4, counter.load(Ordering::SeqCst));
mem::drop(handle_a);
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(4, counter.load(Ordering::SeqCst));
}
#[test]
fn test_watch_event_router_multiple_callback_same_key() {
let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default();
let inc_callback = |inc: usize| {
let counter_clone = counter.clone();
Box::new(move || {
counter_clone.fetch_add(inc, Ordering::SeqCst);
})
};
let handle_a = watch_event_router.subscribe(inc_callback(1));
let handle_a2 = watch_event_router.subscribe(inc_callback(10));
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(0, counter.load(Ordering::SeqCst));
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(22, counter.load(Ordering::SeqCst));
mem::drop(handle_a);
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst));
mem::drop(handle_a2);
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(32, counter.load(Ordering::SeqCst));
}
#[test]
fn test_watch_event_router_multiple_callback_different_key() {
let watch_event_router = WatchCallbackList::default();
let counter: Arc<AtomicUsize> = Default::default();
let counter_clone = counter.clone();
let inc_callback = Box::new(move || {
counter_clone.fetch_add(1, Ordering::SeqCst);
});
let handle_a = watch_event_router.subscribe(inc_callback);
assert_eq!(0, counter.load(Ordering::SeqCst));
watch_event_router.broadcast();
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst));
thread::sleep(Duration::from_millis(WAIT_TIME));
mem::drop(handle_a);
watch_event_router.broadcast();
thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst));
}
}

View File

@@ -162,6 +162,7 @@ impl From<OpenDirectoryError> for TantivyError {
OpenDirectoryError::NotADirectory(directory_path) => { OpenDirectoryError::NotADirectory(directory_path) => {
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path)) TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
} }
OpenDirectoryError::IoError(err) => TantivyError::IOError(IOError::from(err)),
} }
} }
} }

View File

@@ -22,9 +22,7 @@ mod tests {
index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9])); index_writer.add_document(doc!(field=>vec![1u8, 3, 5, 7, 9]));
index_writer.add_document(doc!(field=>vec![0u8; 1000])); index_writer.add_document(doc!(field=>vec![0u8; 1000]));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
let searcher = index.reader().unwrap().searcher();
index.load_searchers().unwrap();
let searcher = index.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let bytes_reader = reader.bytes_fast_field_reader(field).unwrap(); let bytes_reader = reader.bytes_fast_field_reader(field).unwrap();

View File

@@ -7,7 +7,13 @@ pub use self::writer::MultiValueIntFastFieldWriter;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
extern crate time;
use self::time::Duration;
use collector::TopDocs;
use query::QueryParser;
use schema::Cardinality; use schema::Cardinality;
use schema::Facet;
use schema::IntOptions; use schema::IntOptions;
use schema::Schema; use schema::Schema;
use Index; use Index;
@@ -28,11 +34,12 @@ mod tests {
index_writer.add_document(doc!(field=>5u64, field=>20u64,field=>1u64)); index_writer.add_document(doc!(field=>5u64, field=>20u64,field=>1u64));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher(); let segment_reader = searcher.segment_reader(0);
let reader = searcher.segment_reader(0);
let mut vals = Vec::new(); let mut vals = Vec::new();
let multi_value_reader = reader.multi_fast_field_reader::<u64>(field).unwrap(); let multi_value_reader = segment_reader
.multi_fast_field_reader::<u64>(field)
.unwrap();
{ {
multi_value_reader.get_vals(2, &mut vals); multi_value_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[4u64]); assert_eq!(&vals, &[4u64]);
@@ -47,6 +54,133 @@ mod tests {
} }
} }
#[test]
fn test_multivalued_date() {
let mut schema_builder = Schema::builder();
let date_field = schema_builder.add_date_field(
"multi_date_field",
IntOptions::default()
.set_fast(Cardinality::MultiValues)
.set_indexed()
.set_stored(),
);
let time_i =
schema_builder.add_i64_field("time_stamp_i", IntOptions::default().set_stored());
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let first_time_stamp = chrono::Utc::now();
index_writer.add_document(
doc!(date_field=>first_time_stamp, date_field=>first_time_stamp, time_i=>1i64),
);
index_writer.add_document(doc!(time_i=>0i64));
// add one second
index_writer
.add_document(doc!(date_field=>first_time_stamp + Duration::seconds(1), time_i=>2i64));
// add another second
let two_secs_ahead = first_time_stamp + Duration::seconds(2);
index_writer.add_document(doc!(date_field=>two_secs_ahead, date_field=>two_secs_ahead,date_field=>two_secs_ahead, time_i=>3i64));
assert!(index_writer.commit().is_ok());
let reader = index.reader().unwrap();
let searcher = reader.searcher();
let reader = searcher.segment_reader(0);
assert_eq!(reader.num_docs(), 4);
{
let parser = QueryParser::for_index(&index, vec![date_field]);
let query = parser
.parse_query(&format!("\"{}\"", first_time_stamp.to_rfc3339()).to_string())
.expect("could not parse query");
let results = searcher
.search(&query, &TopDocs::with_limit(5))
.expect("could not query index");
assert_eq!(results.len(), 1);
for (_score, doc_address) in results {
let retrieved_doc = searcher.doc(doc_address).expect("cannot fetch doc");
assert_eq!(
retrieved_doc
.get_first(date_field)
.expect("cannot find value")
.date_value()
.timestamp(),
first_time_stamp.timestamp()
);
assert_eq!(
retrieved_doc
.get_first(time_i)
.expect("cannot find value")
.i64_value(),
1i64
);
}
}
{
let parser = QueryParser::for_index(&index, vec![date_field]);
let query = parser
.parse_query(&format!("\"{}\"", two_secs_ahead.to_rfc3339()).to_string())
.expect("could not parse query");
let results = searcher
.search(&query, &TopDocs::with_limit(5))
.expect("could not query index");
assert_eq!(results.len(), 1);
for (_score, doc_address) in results {
let retrieved_doc = searcher.doc(doc_address).expect("cannot fetch doc");
assert_eq!(
retrieved_doc
.get_first(date_field)
.expect("cannot find value")
.date_value()
.timestamp(),
two_secs_ahead.timestamp()
);
assert_eq!(
retrieved_doc
.get_first(time_i)
.expect("cannot find value")
.i64_value(),
3i64
);
}
}
// TODO: support Date range queries
// {
// let parser = QueryParser::for_index(&index, vec![date_field]);
// let range_q = format!("\"{}\"..\"{}\"",
// (first_time_stamp + Duration::seconds(1)).to_rfc3339(),
// (first_time_stamp + Duration::seconds(3)).to_rfc3339()
// );
// let query = parser.parse_query(&range_q)
// .expect("could not parse query");
// let results = searcher.search(&query, &TopDocs::with_limit(5))
// .expect("could not query index");
//
//
// assert_eq!(results.len(), 2);
// for (i, doc_pair) in results.iter().enumerate() {
// let retrieved_doc = searcher.doc(doc_pair.1).expect("cannot fetch doc");
// let offset_sec = match i {
// 0 => 1,
// 1 => 3,
// _ => panic!("should not have more than 2 docs")
// };
// let time_i_val = match i {
// 0 => 2,
// 1 => 3,
// _ => panic!("should not have more than 2 docs")
// };
// assert_eq!(retrieved_doc.get_first(date_field).expect("cannot find value").date_value().timestamp(),
// (first_time_stamp + Duration::seconds(offset_sec)).timestamp());
// assert_eq!(retrieved_doc.get_first(time_i).expect("cannot find value").i64_value(), time_i_val);
// }
// }
}
#[test] #[test]
fn test_multivalued_i64() { fn test_multivalued_i64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -63,8 +197,7 @@ mod tests {
index_writer.add_document(doc!(field=> -5i64, field => -20i64, field=>1i64)); index_writer.add_document(doc!(field=> -5i64, field => -20i64, field=>1i64));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let mut vals = Vec::new(); let mut vals = Vec::new();
let multi_value_reader = reader.multi_fast_field_reader::<i64>(field).unwrap(); let multi_value_reader = reader.multi_fast_field_reader::<i64>(field).unwrap();
@@ -85,4 +218,17 @@ mod tests {
assert_eq!(&vals, &[-5i64, -20i64, 1i64]); assert_eq!(&vals, &[-5i64, -20i64, 1i64]);
} }
} }
#[test]
#[ignore]
fn test_many_facets() {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_facet_field("facetfield");
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for i in 0..100_000 {
index_writer.add_document(doc!(field=> Facet::from(format!("/lang/{}", i).as_str())));
}
assert!(index_writer.commit().is_ok());
}
} }

View File

@@ -75,8 +75,7 @@ mod tests {
index_writer.add_document(doc); index_writer.add_document(doc);
} }
index_writer.commit().expect("Commit failed"); index_writer.commit().expect("Commit failed");
index.load_searchers().expect("Reloading searchers"); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap(); let mut facet_reader = segment_reader.facet_reader(facet_field).unwrap();

View File

@@ -32,7 +32,7 @@ use DocId;
/// term ids when the segment is getting serialized. /// term ids when the segment is getting serialized.
pub struct MultiValueIntFastFieldWriter { pub struct MultiValueIntFastFieldWriter {
field: Field, field: Field,
vals: Vec<u64>, vals: Vec<UnorderedTermId>,
doc_index: Vec<u64>, doc_index: Vec<u64>,
is_facet: bool, is_facet: bool,
} }

View File

@@ -59,7 +59,7 @@ impl<Item: FastValue> FastFieldReader<Item> {
/// May panic if `doc` is greater than the segment /// May panic if `doc` is greater than the segment
// `maxdoc`. // `maxdoc`.
pub fn get(&self, doc: DocId) -> Item { pub fn get(&self, doc: DocId) -> Item {
self.get_u64(doc as u64) self.get_u64(u64::from(doc))
} }
pub(crate) fn get_u64(&self, doc: u64) -> Item { pub(crate) fn get_u64(&self, doc: u64) -> Item {
@@ -98,7 +98,7 @@ impl<Item: FastValue> FastFieldReader<Item> {
/// May panic if `start + output.len()` is greater than /// May panic if `start + output.len()` is greater than
/// the segment's `maxdoc`. /// the segment's `maxdoc`.
pub fn get_range(&self, start: DocId, output: &mut [Item]) { pub fn get_range(&self, start: DocId, output: &mut [Item]) {
self.get_range_u64(start as u64, output); self.get_range_u64(u64::from(start), output);
} }
/// Returns the minimum value for this fast field. /// Returns the minimum value for this fast field.

View File

@@ -13,15 +13,15 @@ fn check_index_content(searcher: &Searcher, vals: &HashSet<u64>) {
#[test] #[test]
#[ignore] #[ignore]
#[cfg(feature = "mmap")]
fn test_indexing() { fn test_indexing() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let id_field = schema_builder.add_u64_field("id", INT_INDEXED); let id_field = schema_builder.add_u64_field("id", INDEXED);
let multiples_field = schema_builder.add_u64_field("multiples", INT_INDEXED); let multiples_field = schema_builder.add_u64_field("multiples", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_from_tempdir(schema).unwrap(); let index = Index::create_from_tempdir(schema).unwrap();
let reader = index.reader().unwrap();
let mut rng = thread_rng(); let mut rng = thread_rng();
@@ -36,8 +36,8 @@ fn test_indexing() {
index_writer.commit().expect("Commit failed"); index_writer.commit().expect("Commit failed");
committed_docs.extend(&uncommitted_docs); committed_docs.extend(&uncommitted_docs);
uncommitted_docs.clear(); uncommitted_docs.clear();
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
// check that everything is correct. // check that everything is correct.
check_index_content(&searcher, &committed_docs); check_index_content(&searcher, &committed_docs);
} else { } else {

View File

@@ -179,6 +179,11 @@ pub struct DeleteCursor {
} }
impl DeleteCursor { impl DeleteCursor {
pub fn empty() -> DeleteCursor {
DeleteQueue::new().cursor()
}
/// Skips operations and position it so that /// Skips operations and position it so that
/// - either all of the delete operation currently in the /// - either all of the delete operation currently in the
/// queue are consume and the next get will return None. /// queue are consume and the next get will return None.

View File

@@ -44,8 +44,8 @@ pub const HEAP_SIZE_MAX: usize = u32::max_value() as usize - MARGIN_IN_BYTES;
// reaches `PIPELINE_MAX_SIZE_IN_DOCS` // reaches `PIPELINE_MAX_SIZE_IN_DOCS`
const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000; const PIPELINE_MAX_SIZE_IN_DOCS: usize = 10_000;
type DocumentSender = channel::Sender<Vec<AddOperation>>; type OperationSender = channel::Sender<Vec<AddOperation>>;
type DocumentReceiver = channel::Receiver<Vec<AddOperation>>; type OperationReceiver = channel::Receiver<Vec<AddOperation>>;
/// Split the thread memory budget into /// Split the thread memory budget into
/// - the heap size /// - the heap size
@@ -85,8 +85,8 @@ pub struct IndexWriter {
workers_join_handle: Vec<JoinHandle<Result<()>>>, workers_join_handle: Vec<JoinHandle<Result<()>>>,
document_receiver: DocumentReceiver, operation_receiver: OperationReceiver,
document_sender: DocumentSender, operation_sender: OperationSender,
segment_updater: SegmentUpdater, segment_updater: SegmentUpdater,
@@ -133,7 +133,7 @@ pub fn open_index_writer(
let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX); let err_msg = format!("The heap size per thread cannot exceed {}", HEAP_SIZE_MAX);
return Err(TantivyError::InvalidArgument(err_msg)); return Err(TantivyError::InvalidArgument(err_msg));
} }
let (document_sender, document_receiver): (DocumentSender, DocumentReceiver) = let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS); channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
let delete_queue = DeleteQueue::new(); let delete_queue = DeleteQueue::new();
@@ -151,8 +151,8 @@ pub fn open_index_writer(
heap_size_in_bytes_per_thread, heap_size_in_bytes_per_thread,
index: index.clone(), index: index.clone(),
document_receiver, operation_receiver: document_receiver,
document_sender, operation_sender: document_sender,
segment_updater, segment_updater,
@@ -259,7 +259,7 @@ pub fn advance_deletes(
write_delete_bitset(&delete_bitset, &mut delete_file)?; write_delete_bitset(&delete_bitset, &mut delete_file)?;
} }
} }
segment_entry.set_meta(segment.meta().clone()); segment_entry.set_meta(target_opstamp, segment.meta().clone());
Ok(()) Ok(())
} }
@@ -326,7 +326,12 @@ fn index_documents(
// to even open the segment. // to even open the segment.
None None
}; };
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, delete_bitset_opt); let segment_entry = SegmentEntry::new(
segment_meta,
delete_cursor,
delete_bitset_opt,
last_docstamp,
);
Ok(segment_updater.add_segment(generation, segment_entry)) Ok(segment_updater.add_segment(generation, segment_entry))
} }
@@ -335,7 +340,7 @@ impl IndexWriter {
pub fn wait_merging_threads(mut self) -> Result<()> { pub fn wait_merging_threads(mut self) -> Result<()> {
// this will stop the indexing thread, // this will stop the indexing thread,
// dropping the last reference to the segment_updater. // dropping the last reference to the segment_updater.
drop(self.document_sender); drop(self.operation_sender);
let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]); let former_workers_handles = mem::replace(&mut self.workers_join_handle, vec![]);
for join_handle in former_workers_handles { for join_handle in former_workers_handles {
@@ -361,9 +366,9 @@ impl IndexWriter {
} }
#[doc(hidden)] #[doc(hidden)]
pub fn add_segment(&mut self, segment_meta: SegmentMeta) { pub fn add_segment(&mut self, segment_meta: SegmentMeta, opstamp: u64) {
let delete_cursor = self.delete_queue.cursor(); let delete_cursor = self.delete_queue.cursor();
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None); let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, None, opstamp);
self.segment_updater self.segment_updater
.add_segment(self.generation, segment_entry); .add_segment(self.generation, segment_entry);
} }
@@ -384,7 +389,7 @@ impl IndexWriter {
/// The thread consumes documents from the pipeline. /// The thread consumes documents from the pipeline.
/// ///
fn add_indexing_worker(&mut self) -> Result<()> { fn add_indexing_worker(&mut self) -> Result<()> {
let document_receiver_clone = self.document_receiver.clone(); let document_receiver_clone = self.operation_receiver.clone();
let mut segment_updater = self.segment_updater.clone(); let mut segment_updater = self.segment_updater.clone();
let generation = self.generation; let generation = self.generation;
@@ -479,11 +484,11 @@ impl IndexWriter {
/// when no documents are remaining. /// when no documents are remaining.
/// ///
/// Returns the former segment_ready channel. /// Returns the former segment_ready channel.
fn recreate_document_channel(&mut self) -> DocumentReceiver { fn recreate_document_channel(&mut self) -> OperationReceiver {
let (document_sender, document_receiver): (DocumentSender, DocumentReceiver) = let (document_sender, document_receiver): (OperationSender, OperationReceiver) =
channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS); channel::bounded(PIPELINE_MAX_SIZE_IN_DOCS);
mem::replace(&mut self.document_sender, document_sender); mem::replace(&mut self.operation_sender, document_sender);
mem::replace(&mut self.document_receiver, document_receiver) mem::replace(&mut self.operation_receiver, document_receiver)
} }
/// Rollback to the last commit /// Rollback to the last commit
@@ -501,7 +506,7 @@ impl IndexWriter {
// segment updates will be ignored. // segment updates will be ignored.
self.segment_updater.kill(); self.segment_updater.kill();
let document_receiver = self.document_receiver.clone(); let document_receiver = self.operation_receiver.clone();
// take the directory lock to create a new index_writer. // take the directory lock to create a new index_writer.
let directory_lock = self let directory_lock = self
@@ -527,7 +532,7 @@ impl IndexWriter {
// //
// This will reach an end as the only document_sender // This will reach an end as the only document_sender
// was dropped with the index_writer. // was dropped with the index_writer.
for _ in document_receiver.clone() {} for _ in document_receiver.iter() {}
Ok(()) Ok(())
} }
@@ -554,6 +559,16 @@ impl IndexWriter {
/// using this API. /// using this API.
/// See [`PreparedCommit::set_payload()`](PreparedCommit.html) /// See [`PreparedCommit::set_payload()`](PreparedCommit.html)
pub fn prepare_commit(&mut self) -> Result<PreparedCommit> { pub fn prepare_commit(&mut self) -> Result<PreparedCommit> {
info!("Preparing commit");
self.prepare_commit_internal(false)
}
pub fn prepare_commit_soft(&mut self) -> Result<PreparedCommit> {
info!("Preparing soft commit");
self.prepare_commit_internal(true)
}
pub(crate) fn prepare_commit_internal(&mut self, soft: bool) -> Result<PreparedCommit> {
// Here, because we join all of the worker threads, // Here, because we join all of the worker threads,
// all of the segment update for this commit have been // all of the segment update for this commit have been
// sent. // sent.
@@ -576,13 +591,13 @@ impl IndexWriter {
let indexing_worker_result = worker_handle let indexing_worker_result = worker_handle
.join() .join()
.map_err(|e| TantivyError::ErrorInThread(format!("{:?}", e)))?; .map_err(|e| TantivyError::ErrorInThread(format!("{:?}", e)))?;
indexing_worker_result?; // add a new worker for the next generation, whether the worker failed or not.
// add a new worker for the next generation.
self.add_indexing_worker()?; self.add_indexing_worker()?;
indexing_worker_result?;
} }
let commit_opstamp = self.stamper.stamp(); let commit_opstamp = self.stamper.stamp();
let prepared_commit = PreparedCommit::new(self, commit_opstamp); let prepared_commit = PreparedCommit::new(self, commit_opstamp, soft);
info!("Prepared commit {}", commit_opstamp); info!("Prepared commit {}", commit_opstamp);
Ok(prepared_commit) Ok(prepared_commit)
} }
@@ -605,6 +620,11 @@ impl IndexWriter {
self.prepare_commit()?.commit() self.prepare_commit()?.commit()
} }
pub fn soft_commit(&mut self) -> Result<u64> {
self.prepare_commit_soft()?.commit()
}
pub(crate) fn segment_updater(&self) -> &SegmentUpdater { pub(crate) fn segment_updater(&self) -> &SegmentUpdater {
&self.segment_updater &self.segment_updater
} }
@@ -648,7 +668,7 @@ impl IndexWriter {
pub fn add_document(&mut self, document: Document) -> u64 { pub fn add_document(&mut self, document: Document) -> u64 {
let opstamp = self.stamper.stamp(); let opstamp = self.stamper.stamp();
let add_operation = AddOperation { opstamp, document }; let add_operation = AddOperation { opstamp, document };
let send_result = self.document_sender.send(vec![add_operation]); let send_result = self.operation_sender.send(vec![add_operation]);
if let Err(e) = send_result { if let Err(e) = send_result {
panic!("Failed to index document. Sending to indexing channel failed. This probably means all of the indexing threads have panicked. {:?}", e); panic!("Failed to index document. Sending to indexing channel failed. This probably means all of the indexing threads have panicked. {:?}", e);
} }
@@ -666,7 +686,7 @@ impl IndexWriter {
let Range { start, end } = self.stamper.stamps(count + 1u64); let Range { start, end } = self.stamper.stamps(count + 1u64);
let last_opstamp = end - 1; let last_opstamp = end - 1;
let stamps = Range { let stamps = Range {
start: start, start,
end: last_opstamp, end: last_opstamp,
}; };
(last_opstamp, stamps) (last_opstamp, stamps)
@@ -675,16 +695,16 @@ impl IndexWriter {
/// Runs a group of document operations ensuring that the operations are /// Runs a group of document operations ensuring that the operations are
/// assigned contigous u64 opstamps and that add operations of the same /// assigned contigous u64 opstamps and that add operations of the same
/// group are flushed into the same segment. /// group are flushed into the same segment.
/// ///
/// If the indexing pipeline is full, this call may block. /// If the indexing pipeline is full, this call may block.
/// ///
/// Each operation of the given `user_operations` will receive an in-order, /// Each operation of the given `user_operations` will receive an in-order,
/// contiguous u64 opstamp. The entire batch itself is also given an /// contiguous u64 opstamp. The entire batch itself is also given an
/// opstamp that is 1 greater than the last given operation. This /// opstamp that is 1 greater than the last given operation. This
/// `batch_opstamp` is the return value of `run`. An empty group of /// `batch_opstamp` is the return value of `run`. An empty group of
/// `user_operations`, an empty `Vec<UserOperation>`, still receives /// `user_operations`, an empty `Vec<UserOperation>`, still receives
/// a valid opstamp even though no changes were _actually_ made to the index. /// a valid opstamp even though no changes were _actually_ made to the index.
/// ///
/// Like adds and deletes (see `IndexWriter.add_document` and /// Like adds and deletes (see `IndexWriter.add_document` and
/// `IndexWriter.delete_term`), the changes made by calling `run` will be /// `IndexWriter.delete_term`), the changes made by calling `run` will be
/// visible to readers only after calling `commit()`. /// visible to readers only after calling `commit()`.
@@ -700,22 +720,16 @@ impl IndexWriter {
for (user_op, opstamp) in user_operations.into_iter().zip(stamps) { for (user_op, opstamp) in user_operations.into_iter().zip(stamps) {
match user_op { match user_op {
UserOperation::Delete(term) => { UserOperation::Delete(term) => {
let delete_operation = DeleteOperation { let delete_operation = DeleteOperation { opstamp, term };
opstamp: opstamp,
term: term,
};
self.delete_queue.push(delete_operation); self.delete_queue.push(delete_operation);
} }
UserOperation::Add(doc) => { UserOperation::Add(document) => {
let add_operation = AddOperation { let add_operation = AddOperation { opstamp, document };
opstamp: opstamp,
document: doc,
};
adds.push(add_operation); adds.push(add_operation);
} }
} }
} }
let send_result = self.document_sender.send(adds); let send_result = self.operation_sender.send(adds);
if let Err(e) = send_result { if let Err(e) = send_result {
panic!("Failed to index document. Sending to indexing channel failed. This probably means all of the indexing threads have panicked. {:?}", e); panic!("Failed to index document. Sending to indexing channel failed. This probably means all of the indexing threads have panicked. {:?}", e);
}; };
@@ -729,14 +743,16 @@ mod tests {
use super::super::operation::UserOperation; use super::super::operation::UserOperation;
use super::initial_table_size; use super::initial_table_size;
use collector::TopDocs;
use directory::error::LockError; use directory::error::LockError;
use error::*; use error::*;
use indexer::NoMergePolicy; use indexer::NoMergePolicy;
use schema::{self, Document, IndexRecordOption}; use query::TermQuery;
use query::{TermQuery}; use schema::{self, IndexRecordOption};
use collector::TopDocs;
use Index; use Index;
use ReloadPolicy;
use Term; use Term;
use IndexReader;
#[test] #[test]
fn test_operations_group() { fn test_operations_group() {
@@ -763,6 +779,11 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let a_term = Term::from_field_text(text_field, "a"); let a_term = Term::from_field_text(text_field, "a");
let b_term = Term::from_field_text(text_field, "b"); let b_term = Term::from_field_text(text_field, "b");
@@ -775,15 +796,15 @@ mod tests {
index_writer.run(operations); index_writer.run(operations);
index_writer.commit().expect("failed to commit"); index_writer.commit().expect("failed to commit");
index.load_searchers().expect("failed to load searchers"); reader.reload().expect("failed to load searchers");
let a_term = Term::from_field_text(text_field, "a"); let a_term = Term::from_field_text(text_field, "a");
let b_term = Term::from_field_text(text_field, "b"); let b_term = Term::from_field_text(text_field, "b");
let a_query = TermQuery::new(a_term, IndexRecordOption::Basic); let a_query = TermQuery::new(a_term, IndexRecordOption::Basic);
let b_query = TermQuery::new(b_term, IndexRecordOption::Basic); let b_query = TermQuery::new(b_term, IndexRecordOption::Basic);
let searcher = index.searcher(); let searcher = reader.searcher();
let a_docs = searcher let a_docs = searcher
.search(&a_query, &TopDocs::with_limit(1)) .search(&a_query, &TopDocs::with_limit(1))
@@ -865,24 +886,37 @@ mod tests {
let _index_writer_two = index.writer(3_000_000).unwrap(); let _index_writer_two = index.writer(3_000_000).unwrap();
} }
fn num_docs_containing_text(reader: &IndexReader, term: &str) -> u64 {
let searcher = reader.searcher();
let text_field = reader.schema().get_field("text").unwrap();
let term = Term::from_field_text(text_field, term);
searcher.doc_freq(&term)
}
#[test] #[test]
fn test_commit_and_rollback() { fn test_commit_and_rollback() {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let searcher = index.searcher(); let searcher = reader.searcher();
let term = Term::from_field_text(text_field, s); let term = Term::from_field_text(text_field, s);
searcher.doc_freq(&term) searcher.doc_freq(&term)
}; };
let mut index_writer = index.writer(3_000_000).unwrap();
assert_eq!(index_writer.commit_opstamp(), 0u64);
assert_eq!(num_docs_containing_text(&reader, "a"), 0);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a")); index_writer.add_document(doc!(text_field=>"a"));
index_writer.rollback().unwrap(); index_writer.rollback().unwrap();
assert_eq!(index_writer.commit_opstamp(), 0u64); assert_eq!(index_writer.commit_opstamp(), 0u64);
assert_eq!(num_docs_containing("a"), 0); assert_eq!(num_docs_containing("a"), 0);
{ {
@@ -890,13 +924,42 @@ mod tests {
index_writer.add_document(doc!(text_field=>"c")); index_writer.add_document(doc!(text_field=>"c"));
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
index.load_searchers().unwrap(); reader.reload().unwrap();
assert_eq!(num_docs_containing("a"), 0); assert_eq!(num_docs_containing("a"), 0);
assert_eq!(num_docs_containing("b"), 1); assert_eq!(num_docs_containing("b"), 1);
assert_eq!(num_docs_containing("c"), 1); assert_eq!(num_docs_containing("c"), 1);
} }
index.load_searchers().unwrap(); reader.reload().unwrap();
index.searcher(); reader.searcher();
}
#[test]
fn test_softcommit_and_rollback() {
let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap();
// writing the segment
let mut index_writer = index.writer(3_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a"));
index_writer.rollback().unwrap();
assert_eq!(index_writer.commit_opstamp(), 0u64);
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
{
index_writer.add_document(doc!(text_field=>"b"));
index_writer.add_document(doc!(text_field=>"c"));
}
assert!(index_writer.soft_commit().is_ok());
reader.reload().unwrap(); // we need to load soft committed stuff.
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "b"), 1u64);
assert_eq!(num_docs_containing_text(&reader, "c"), 1u64);
index_writer.rollback().unwrap();
reader.reload().unwrap();
assert_eq!(num_docs_containing_text(&reader, "a"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "b"), 0u64);
assert_eq!(num_docs_containing_text(&reader, "c"), 0u64);
} }
#[test] #[test]
@@ -904,34 +967,35 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let searcher = index.searcher();
let term_a = Term::from_field_text(text_field, s); let term_a = Term::from_field_text(text_field, s);
searcher.doc_freq(&term_a) reader.searcher().doc_freq(&term_a)
}; };
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer(12_000_000).unwrap(); let mut index_writer = index.writer(12_000_000).unwrap();
// create 8 segments with 100 tiny docs // create 8 segments with 100 tiny docs
for _doc in 0..100 { for _doc in 0..100 {
let mut doc = Document::default(); index_writer.add_document(doc!(text_field=>"a"));
doc.add_text(text_field, "a");
index_writer.add_document(doc);
} }
index_writer.commit().expect("commit failed"); index_writer.commit().expect("commit failed");
for _doc in 0..100 { for _doc in 0..100 {
let mut doc = Document::default(); index_writer.add_document(doc!(text_field=>"a"));
doc.add_text(text_field, "a");
index_writer.add_document(doc);
} }
// this should create 8 segments and trigger a merge. // this should create 8 segments and trigger a merge.
index_writer.commit().expect("commit failed"); index_writer.commit().expect("commit failed");
index_writer index_writer
.wait_merging_threads() .wait_merging_threads()
.expect("waiting merging thread failed"); .expect("waiting merging thread failed");
index.load_searchers().unwrap();
assert_eq!(num_docs_containing("a"), 200); reader.reload().unwrap();
assert_eq!(num_docs_containing_text(&reader, "a"), 200);
assert!(index.searchable_segments().unwrap().len() < 8); assert!(index.searchable_segments().unwrap().len() < 8);
} }
} }
@@ -974,7 +1038,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT); let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader();
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(4, 12_000_000).unwrap();
@@ -996,11 +1060,15 @@ mod tests {
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap();
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let searcher = index.searcher();
let term_a = Term::from_field_text(text_field, s); let term_a = Term::from_field_text(text_field, s);
searcher.doc_freq(&term_a) index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap()
.searcher()
.doc_freq(&term_a)
}; };
assert_eq!(num_docs_containing("a"), 0); assert_eq!(num_docs_containing("a"), 0);
assert_eq!(num_docs_containing("b"), 100); assert_eq!(num_docs_containing("b"), 100);
@@ -1008,9 +1076,9 @@ mod tests {
#[test] #[test]
fn test_hashmap_size() { fn test_hashmap_size() {
assert_eq!(initial_table_size(100_000), 12); assert_eq!(initial_table_size(100_000), 11);
assert_eq!(initial_table_size(1_000_000), 15); assert_eq!(initial_table_size(1_000_000), 14);
assert_eq!(initial_table_size(10_000_000), 18); assert_eq!(initial_table_size(10_000_000), 17);
assert_eq!(initial_table_size(1_000_000_000), 19); assert_eq!(initial_table_size(1_000_000_000), 19);
} }
@@ -1032,11 +1100,9 @@ mod tests {
index_writer.add_document(doc!(text_field => "b")); index_writer.add_document(doc!(text_field => "b"));
} }
assert!(index_writer.commit().is_err()); assert!(index_writer.commit().is_err());
index.load_searchers().unwrap();
let num_docs_containing = |s: &str| { let num_docs_containing = |s: &str| {
let searcher = index.searcher();
let term_a = Term::from_field_text(text_field, s); let term_a = Term::from_field_text(text_field, s);
searcher.doc_freq(&term_a) index.reader().unwrap().searcher().doc_freq(&term_a)
}; };
assert_eq!(num_docs_containing("a"), 100); assert_eq!(num_docs_containing("a"), 100);
assert_eq!(num_docs_containing("b"), 0); assert_eq!(num_docs_containing("b"), 0);

View File

@@ -1,3 +1,4 @@
use common::MAX_DOC_LIMIT;
use core::Segment; use core::Segment;
use core::SegmentReader; use core::SegmentReader;
use core::SerializableSegment; use core::SerializableSegment;
@@ -23,6 +24,7 @@ use termdict::TermMerger;
use termdict::TermOrdinal; use termdict::TermOrdinal;
use DocId; use DocId;
use Result; use Result;
use TantivyError;
fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 { fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
let mut total_tokens = 0u64; let mut total_tokens = 0u64;
@@ -150,6 +152,14 @@ impl IndexMerger {
readers.push(reader); readers.push(reader);
} }
} }
if max_doc >= MAX_DOC_LIMIT {
let err_msg = format!(
"The segment resulting from this merge would have {} docs,\
which exceeds the limit {}.",
max_doc, MAX_DOC_LIMIT
);
return Err(TantivyError::InvalidArgument(err_msg));
}
Ok(IndexMerger { Ok(IndexMerger {
schema, schema,
readers, readers,
@@ -194,17 +204,17 @@ impl IndexMerger {
fast_field_serializer, fast_field_serializer,
)?; )?;
} }
FieldType::U64(ref options) | FieldType::I64(ref options) => { FieldType::U64(ref options)
match options.get_fastfield_cardinality() { | FieldType::I64(ref options)
Some(Cardinality::SingleValue) => { | FieldType::Date(ref options) => match options.get_fastfield_cardinality() {
self.write_single_fast_field(field, fast_field_serializer)?; Some(Cardinality::SingleValue) => {
} self.write_single_fast_field(field, fast_field_serializer)?;
Some(Cardinality::MultiValues) => {
self.write_multi_fast_field(field, fast_field_serializer)?;
}
None => {}
} }
} Some(Cardinality::MultiValues) => {
self.write_multi_fast_field(field, fast_field_serializer)?;
}
None => {}
},
FieldType::Str(_) => { FieldType::Str(_) => {
// We don't handle str fast field for the moment // We don't handle str fast field for the moment
// They can be implemented using what is done // They can be implemented using what is done
@@ -654,7 +664,7 @@ mod tests {
use schema::IntOptions; use schema::IntOptions;
use schema::Term; use schema::Term;
use schema::TextFieldIndexing; use schema::TextFieldIndexing;
use schema::INT_INDEXED; use schema::INDEXED;
use std::io::Cursor; use std::io::Cursor;
use DocAddress; use DocAddress;
use IndexWriter; use IndexWriter;
@@ -671,11 +681,13 @@ mod tests {
) )
.set_stored(); .set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype); let text_field = schema_builder.add_text_field("text", text_fieldtype);
let date_field = schema_builder.add_date_field("date", INDEXED);
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue); let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype); let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes"); let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap();
let curr_time = chrono::Utc::now();
let add_score_bytes = |doc: &mut Document, score: u32| { let add_score_bytes = |doc: &mut Document, score: u32| {
let mut bytes = Vec::new(); let mut bytes = Vec::new();
bytes bytes
@@ -692,6 +704,7 @@ mod tests {
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_text(text_field, "af b"); doc.add_text(text_field, "af b");
doc.add_u64(score_field, 3); doc.add_u64(score_field, 3);
doc.add_date(date_field, &curr_time);
add_score_bytes(&mut doc, 3); add_score_bytes(&mut doc, 3);
index_writer.add_document(doc); index_writer.add_document(doc);
} }
@@ -717,6 +730,7 @@ mod tests {
{ {
let mut doc = Document::default(); let mut doc = Document::default();
doc.add_text(text_field, "af b"); doc.add_text(text_field, "af b");
doc.add_date(date_field, &curr_time);
doc.add_u64(score_field, 11); doc.add_u64(score_field, 11);
add_score_bytes(&mut doc, 11); add_score_bytes(&mut doc, 11);
index_writer.add_document(doc); index_writer.add_document(doc);
@@ -744,8 +758,8 @@ mod tests {
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
} }
{ {
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
let top_docs = searcher.search(&query, &TestCollector).unwrap(); let top_docs = searcher.search(&query, &TestCollector).unwrap();
@@ -774,6 +788,10 @@ mod tests {
DocAddress(0, 4) DocAddress(0, 4)
] ]
); );
assert_eq!(
get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)]),
vec![DocAddress(0, 0), DocAddress(0, 3)]
);
} }
{ {
let doc = searcher.doc(DocAddress(0, 0)).unwrap(); let doc = searcher.doc(DocAddress(0, 0)).unwrap();
@@ -837,7 +855,7 @@ mod tests {
let bytes_score_field = schema_builder.add_bytes_field("score_bytes"); let bytes_score_field = schema_builder.add_bytes_field("score_bytes");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let reader = index.reader().unwrap();
let search_term = |searcher: &Searcher, term: Term| { let search_term = |searcher: &Searcher, term: Term| {
let collector = FastFieldTestCollector::for_field(score_field); let collector = FastFieldTestCollector::for_field(score_field);
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field); let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
@@ -874,8 +892,8 @@ mod tests {
bytes_score_field => vec![0u8, 0, 0, 3], bytes_score_field => vec![0u8, 0, 0, 3],
)); ));
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
index.load_searchers().unwrap(); reader.reload().unwrap();
let ref searcher = *index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3); assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
@@ -921,8 +939,8 @@ mod tests {
bytes_score_field => vec![0u8, 0, 27, 88], bytes_score_field => vec![0u8, 0, 27, 88],
)); ));
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2); assert_eq!(searcher.segment_readers().len(), 2);
assert_eq!(searcher.num_docs(), 3); assert_eq!(searcher.num_docs(), 3);
@@ -983,8 +1001,8 @@ mod tests {
.expect("Failed to initiate merge") .expect("Failed to initiate merge")
.wait() .wait()
.expect("Merging failed"); .expect("Merging failed");
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 3); assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 3); assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
@@ -1029,8 +1047,8 @@ mod tests {
index_writer.delete_term(Term::from_field_text(text_field, "c")); index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
@@ -1080,9 +1098,9 @@ mod tests {
.expect("Failed to initiate merge") .expect("Failed to initiate merge")
.wait() .wait()
.expect("Merging failed"); .expect("Merging failed");
index.load_searchers().unwrap(); reader.reload().unwrap();
let ref searcher = *index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1); assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2); assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
@@ -1130,9 +1148,9 @@ mod tests {
let segment_ids = index let segment_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
index.load_searchers().unwrap(); reader.reload().unwrap();
let ref searcher = *index.searcher(); let searcher = reader.searcher();
assert!(segment_ids.is_empty()); assert!(segment_ids.is_empty());
assert!(searcher.segment_readers().is_empty()); assert!(searcher.segment_readers().is_empty());
assert_eq!(searcher.num_docs(), 0); assert_eq!(searcher.num_docs(), 0);
@@ -1144,6 +1162,7 @@ mod tests {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let facet_field = schema_builder.add_facet_field("facet"); let facet_field = schema_builder.add_facet_field("facet");
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap();
{ {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| { let index_doc = |index_writer: &mut IndexWriter, doc_facets: &[&str]| {
@@ -1173,9 +1192,9 @@ mod tests {
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
} }
index.load_searchers().unwrap(); reader.reload().unwrap();
let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| { let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
let searcher = index.searcher(); let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field); let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top")); facet_collector.add_facet(Facet::from("/top"));
let (count, facet_counts) = searcher let (count, facet_counts) = searcher
@@ -1217,7 +1236,7 @@ mod tests {
.wait() .wait()
.expect("Merging failed"); .expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
index.load_searchers().unwrap(); reader.reload().unwrap();
test_searcher( test_searcher(
11, 11,
&[ &[
@@ -1238,7 +1257,7 @@ mod tests {
let facet_term = Term::from_facet(facet_field, &facet); let facet_term = Term::from_facet(facet_field, &facet);
index_writer.delete_term(facet_term); index_writer.delete_term(facet_term);
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); reader.reload().unwrap();
test_searcher( test_searcher(
9, 9,
&[ &[
@@ -1256,15 +1275,15 @@ mod tests {
#[test] #[test]
fn test_bug_merge() { fn test_bug_merge() {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let int_field = schema_builder.add_u64_field("intvals", INT_INDEXED); let int_field = schema_builder.add_u64_field("intvals", INDEXED);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(int_field => 1u64)); index_writer.add_document(doc!(int_field => 1u64));
index_writer.commit().expect("commit failed"); index_writer.commit().expect("commit failed");
index_writer.add_document(doc!(int_field => 1u64)); index_writer.add_document(doc!(int_field => 1u64));
index_writer.commit().expect("commit failed"); index_writer.commit().expect("commit failed");
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
index_writer.delete_term(Term::from_field_u64(int_field, 1)); index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index let segment_ids = index
@@ -1275,10 +1294,10 @@ mod tests {
.expect("Failed to initiate merge") .expect("Failed to initiate merge")
.wait() .wait()
.expect("Merging failed"); .expect("Merging failed");
index.load_searchers().unwrap(); reader.reload().unwrap();
// commit has not been called yet. The document should still be // commit has not been called yet. The document should still be
// there. // there.
assert_eq!(index.searcher().num_docs(), 2); assert_eq!(reader.searcher().num_docs(), 2);
} }
#[test] #[test]
@@ -1289,7 +1308,7 @@ mod tests {
.set_indexed(); .set_indexed();
let int_field = schema_builder.add_u64_field("intvals", int_options); let int_field = schema_builder.add_u64_field("intvals", int_options);
let index = Index::create_in_ram(schema_builder.build()); let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap();
{ {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
let mut doc = Document::default(); let mut doc = Document::default();
@@ -1310,8 +1329,8 @@ mod tests {
.expect("Merging failed"); .expect("Merging failed");
// assert delete has not been committed // assert delete has not been committed
index.load_searchers().unwrap(); reader.reload().expect("failed to load searcher 1");
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2); assert_eq!(searcher.num_docs(), 2);
index_writer.commit().unwrap(); index_writer.commit().unwrap();
@@ -1319,13 +1338,13 @@ mod tests {
index_writer.wait_merging_threads().unwrap(); index_writer.wait_merging_threads().unwrap();
} }
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 0); assert_eq!(searcher.num_docs(), 0);
} }
#[test] #[test]
fn test_merge_multivalued_int_fields() { fn test_merge_multivalued_int_fields_simple() {
let mut schema_builder = schema::Schema::builder(); let mut schema_builder = schema::Schema::builder();
let int_options = IntOptions::default() let int_options = IntOptions::default()
.set_fast(Cardinality::MultiValues) .set_fast(Cardinality::MultiValues)
@@ -1342,7 +1361,6 @@ mod tests {
} }
index_writer.add_document(doc); index_writer.add_document(doc);
}; };
index_doc(&mut index_writer, &[1, 2]); index_doc(&mut index_writer, &[1, 2]);
index_doc(&mut index_writer, &[1, 2, 3]); index_doc(&mut index_writer, &[1, 2, 3]);
index_doc(&mut index_writer, &[4, 5]); index_doc(&mut index_writer, &[4, 5]);
@@ -1351,19 +1369,14 @@ mod tests {
index_doc(&mut index_writer, &[3]); index_doc(&mut index_writer, &[3]);
index_doc(&mut index_writer, &[17]); index_doc(&mut index_writer, &[17]);
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[20]); index_doc(&mut index_writer, &[20]);
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
index_doc(&mut index_writer, &[28, 27]); index_doc(&mut index_writer, &[28, 27]);
index_doc(&mut index_writer, &[1_000]); index_doc(&mut index_writer, &[1_000]);
index_writer.commit().expect("committed"); index_writer.commit().expect("committed");
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher();
let searcher = index.searcher();
let mut vals: Vec<u64> = Vec::new(); let mut vals: Vec<u64> = Vec::new();
{ {
@@ -1429,13 +1442,14 @@ mod tests {
.expect("Failed to initiate merge") .expect("Failed to initiate merge")
.wait() .wait()
.expect("Merging failed"); .expect("Merging failed");
index_writer.wait_merging_threads().unwrap(); index_writer
.wait_merging_threads()
.expect("Wait for merging threads");
} }
reader.reload().expect("Load searcher");
index.load_searchers().unwrap();
{ {
let searcher = index.searcher(); let searcher = reader.searcher();
println!( println!(
"{:?}", "{:?}",
searcher searcher

View File

@@ -6,14 +6,20 @@ pub struct PreparedCommit<'a> {
index_writer: &'a mut IndexWriter, index_writer: &'a mut IndexWriter,
payload: Option<String>, payload: Option<String>,
opstamp: u64, opstamp: u64,
soft: bool,
} }
impl<'a> PreparedCommit<'a> { impl<'a> PreparedCommit<'a> {
pub(crate) fn new(index_writer: &'a mut IndexWriter, opstamp: u64) -> PreparedCommit { pub(crate) fn new(
index_writer: &'a mut IndexWriter,
opstamp: u64,
soft: bool,
) -> PreparedCommit {
PreparedCommit { PreparedCommit {
index_writer, index_writer,
payload: None, payload: None,
opstamp, opstamp,
soft,
} }
} }
@@ -33,7 +39,7 @@ impl<'a> PreparedCommit<'a> {
info!("committing {}", self.opstamp); info!("committing {}", self.opstamp);
self.index_writer self.index_writer
.segment_updater() .segment_updater()
.commit(self.opstamp, self.payload)?; .commit(self.opstamp, self.payload, self.soft)?;
Ok(self.opstamp) Ok(self.opstamp)
} }
} }

View File

@@ -22,6 +22,7 @@ pub struct SegmentEntry {
meta: SegmentMeta, meta: SegmentMeta,
delete_bitset: Option<BitSet>, delete_bitset: Option<BitSet>,
delete_cursor: DeleteCursor, delete_cursor: DeleteCursor,
opstamp: u64,
} }
impl SegmentEntry { impl SegmentEntry {
@@ -30,14 +31,20 @@ impl SegmentEntry {
segment_meta: SegmentMeta, segment_meta: SegmentMeta,
delete_cursor: DeleteCursor, delete_cursor: DeleteCursor,
delete_bitset: Option<BitSet>, delete_bitset: Option<BitSet>,
opstamp: u64,
) -> SegmentEntry { ) -> SegmentEntry {
SegmentEntry { SegmentEntry {
meta: segment_meta, meta: segment_meta,
delete_bitset, delete_bitset,
delete_cursor, delete_cursor,
opstamp,
} }
} }
pub fn opstamp(&self) -> u64 {
self.opstamp
}
/// Return a reference to the segment entry deleted bitset. /// Return a reference to the segment entry deleted bitset.
/// ///
/// `DocId` in this bitset are flagged as deleted. /// `DocId` in this bitset are flagged as deleted.
@@ -46,7 +53,8 @@ impl SegmentEntry {
} }
/// Set the `SegmentMeta` for this segment. /// Set the `SegmentMeta` for this segment.
pub fn set_meta(&mut self, segment_meta: SegmentMeta) { pub fn set_meta(&mut self, opstamp: u64, segment_meta: SegmentMeta) {
self.opstamp = opstamp;
self.meta = segment_meta; self.meta = segment_meta;
} }

View File

@@ -11,11 +11,47 @@ use std::path::PathBuf;
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::{RwLockReadGuard, RwLockWriteGuard}; use std::sync::{RwLockReadGuard, RwLockWriteGuard};
use Result as TantivyResult; use Result as TantivyResult;
use std::sync::Arc;
use std::collections::HashMap;
/// Provides a read-only view of the available segments.
#[derive(Clone)]
pub struct AvailableSegments {
registers: Arc<RwLock<SegmentRegisters>>,
}
impl AvailableSegments {
pub fn committed(&self) -> Vec<SegmentMeta> {
self.registers
.read()
.unwrap()
.committed
.segment_metas()
}
pub fn soft_committed(&self) -> Vec<SegmentMeta> {
self.registers
.read()
.unwrap()
.soft_committed
.segment_metas()
}
}
#[derive(Default)]
struct SegmentRegisters { struct SegmentRegisters {
uncommitted: SegmentRegister, uncommitted: HashMap<SegmentId, SegmentEntry>,
committed: SegmentRegister, committed: SegmentRegister,
/// soft commits can advance committed segment to a future delete
/// opstamp.
///
/// In that case the same `SegmentId` can appear in both `committed`
/// and in `committed_in_the_future`.
///
/// We do not consider these segments for merges.
soft_committed: SegmentRegister,
/// `DeleteCursor`, positionned on the soft commit.
delete_cursor: DeleteCursor,
} }
/// The segment manager stores the list of segments /// The segment manager stores the list of segments
@@ -23,9 +59,8 @@ struct SegmentRegisters {
/// ///
/// It guarantees the atomicity of the /// It guarantees the atomicity of the
/// changes (merges especially) /// changes (merges especially)
#[derive(Default)]
pub struct SegmentManager { pub struct SegmentManager {
registers: RwLock<SegmentRegisters>, registers: Arc<RwLock<SegmentRegisters>>
} }
impl Debug for SegmentManager { impl Debug for SegmentManager {
@@ -46,11 +81,17 @@ pub fn get_mergeable_segments(
let registers_lock = segment_manager.read(); let registers_lock = segment_manager.read();
( (
registers_lock registers_lock
.committed .soft_committed
.get_mergeable_segments(in_merge_segment_ids), .get_mergeable_segments(in_merge_segment_ids),
registers_lock registers_lock
.uncommitted .uncommitted
.get_mergeable_segments(in_merge_segment_ids), .values()
.map(|segment_entry| segment_entry.meta())
.filter(|segment_meta| {
!in_merge_segment_ids.contains(&segment_meta.id())
})
.cloned()
.collect::<Vec<_>>()
) )
} }
@@ -58,21 +99,22 @@ impl SegmentManager {
pub fn from_segments( pub fn from_segments(
segment_metas: Vec<SegmentMeta>, segment_metas: Vec<SegmentMeta>,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
opstamp: u64,
) -> SegmentManager { ) -> SegmentManager {
SegmentManager { SegmentManager {
registers: RwLock::new(SegmentRegisters { registers: Arc::new(RwLock::new(SegmentRegisters {
uncommitted: SegmentRegister::default(), uncommitted: HashMap::default(),
committed: SegmentRegister::new(segment_metas, delete_cursor), committed: SegmentRegister::new(segment_metas.clone(), opstamp),
}), soft_committed: SegmentRegister::new(segment_metas, opstamp),
delete_cursor: delete_cursor.clone(),
}))
} }
} }
/// Returns all of the segment entries (committed or uncommitted) pub fn available_segments_view(&self) -> AvailableSegments {
pub fn segment_entries(&self) -> Vec<SegmentEntry> { AvailableSegments {
let registers_lock = self.read(); registers: self.registers.clone()
let mut segment_entries = registers_lock.uncommitted.segment_entries(); }
segment_entries.extend(registers_lock.committed.segment_entries());
segment_entries
} }
/// List the files that are useful to the index. /// List the files that are useful to the index.
@@ -108,44 +150,76 @@ impl SegmentManager {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock registers_lock
.committed .committed
.segment_entries() .segment_metas()
.iter() .iter()
.filter(|segment| segment.meta().num_docs() == 0) .filter(|segment_meta| segment_meta.num_docs() == 0)
.for_each(|segment| { .for_each(|segment_meta| {
registers_lock registers_lock
.committed .committed
.remove_segment(&segment.segment_id()) .remove_segment(&segment_meta.id())
});
registers_lock
.soft_committed
.segment_metas()
.iter()
.filter(|segment_meta| segment_meta.num_docs() == 0)
.for_each(|segment_meta| {
registers_lock
.committed
.remove_segment(&segment_meta.id())
}); });
} }
pub fn commit(&self, segment_entries: Vec<SegmentEntry>) { /// Returns all of the segment entries (soft committed or uncommitted)
let mut registers_lock = self.write(); pub fn segment_entries(&self) -> Vec<SegmentEntry> {
registers_lock.committed.clear(); let registers_lock = self.read();
registers_lock.uncommitted.clear(); let mut segment_entries: Vec<SegmentEntry > = registers_lock.uncommitted.values().cloned().collect();
for segment_entry in segment_entries { segment_entries.extend(registers_lock.soft_committed.segment_entries(&registers_lock.delete_cursor).into_iter());
registers_lock.committed.add_segment_entry(segment_entry); segment_entries
}
} }
/// Marks a list of segments as in merge.
pub fn commit(&self, opstamp: u64, segment_entries: Vec<SegmentEntry>) {
let mut registers_lock = self.write();
registers_lock.uncommitted.clear();
registers_lock
.committed
.set_commit(opstamp, segment_entries.clone());
registers_lock
.soft_committed
.set_commit(opstamp, segment_entries);
registers_lock.delete_cursor.skip_to(opstamp);
}
pub fn soft_commit(&self, opstamp: u64, segment_entries: Vec<SegmentEntry>) {
let mut registers_lock = self.write();
registers_lock.uncommitted.clear();
registers_lock
.soft_committed
.set_commit(opstamp, segment_entries);
registers_lock.delete_cursor.skip_to(opstamp);
}
/// Gets the list of segment_entries associated to a list of `segment_ids`.
/// This method is used when starting a merge operations.
/// ///
/// Returns an error if some segments are missing, or if /// Returns an error if some segments are missing, or if
/// the `segment_ids` are not either all committed or all /// the `segment_ids` are not either all soft_committed or all
/// uncommitted. /// uncommitted.
pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> { pub fn start_merge(&self, segment_ids: &[SegmentId]) -> TantivyResult<Vec<SegmentEntry>> {
let registers_lock = self.read(); let registers_lock = self.read();
let mut segment_entries = vec![]; let mut segment_entries = vec![];
if registers_lock.uncommitted.contains_all(segment_ids) { if segment_ids.iter().all(|segment_id| registers_lock.uncommitted.contains_key(segment_id)) {
for segment_id in segment_ids { for segment_id in segment_ids {
let segment_entry = registers_lock.uncommitted let segment_entry = registers_lock.uncommitted
.get(segment_id) .get(segment_id)
.expect("Segment id not found {}. Should never happen because of the contains all if-block."); .expect("Segment id not found {}. Should never happen because of the contains all if-block.");
segment_entries.push(segment_entry); segment_entries.push(segment_entry.clone());
} }
} else if registers_lock.committed.contains_all(segment_ids) { } else if registers_lock.soft_committed.contains_all(segment_ids) {
for segment_id in segment_ids { for segment_id in segment_ids {
let segment_entry = registers_lock.committed let segment_entry = registers_lock.soft_committed
.get(segment_id) .get(segment_id, &registers_lock.delete_cursor)
.expect("Segment id not found {}. Should never happen because of the contains all if-block."); .expect("Segment id not found {}. Should never happen because of the contains all if-block.");
segment_entries.push(segment_entry); segment_entries.push(segment_entry);
} }
@@ -160,35 +234,32 @@ impl SegmentManager {
pub fn add_segment(&self, segment_entry: SegmentEntry) { pub fn add_segment(&self, segment_entry: SegmentEntry) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
registers_lock.uncommitted.add_segment_entry(segment_entry); registers_lock
.uncommitted
.insert(segment_entry.segment_id(), segment_entry);
} }
pub fn end_merge( pub fn end_merge(
&self, &self,
before_merge_segment_ids: &[SegmentId], before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: SegmentEntry, after_merge_segment_entry: SegmentEntry
) { ) {
let mut registers_lock = self.write(); let mut registers_lock = self.write();
let target_register: &mut SegmentRegister = {
if registers_lock if before_merge_segment_ids.iter().all(|seg_id|
registers_lock
.uncommitted .uncommitted
.contains_all(before_merge_segment_ids) .contains_key(seg_id))
{ {
&mut registers_lock.uncommitted for segment_id in before_merge_segment_ids {
} else if registers_lock registers_lock.uncommitted.remove(&segment_id);
.committed
.contains_all(before_merge_segment_ids)
{
&mut registers_lock.committed
} else {
warn!("couldn't find segment in SegmentManager");
return;
} }
}; registers_lock.uncommitted.insert(after_merge_segment_entry.segment_id(),
for segment_id in before_merge_segment_ids { after_merge_segment_entry);
target_register.remove_segment(segment_id); } else {
registers_lock.committed.receive_merge(&before_merge_segment_ids, &after_merge_segment_entry);
registers_lock.soft_committed.receive_merge(&before_merge_segment_ids, &after_merge_segment_entry)
} }
target_register.add_segment_entry(after_merge_segment_entry);
} }
pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> { pub fn committed_segment_metas(&self) -> Vec<SegmentMeta> {

View File

@@ -16,7 +16,8 @@ use std::fmt::{self, Debug, Formatter};
/// merge candidates. /// merge candidates.
#[derive(Default)] #[derive(Default)]
pub struct SegmentRegister { pub struct SegmentRegister {
segment_states: HashMap<SegmentId, SegmentEntry>, segment_states: HashMap<SegmentId, SegmentMeta>,
opstamp_constraint: u64,
} }
impl Debug for SegmentRegister { impl Debug for SegmentRegister {
@@ -41,23 +42,28 @@ impl SegmentRegister {
) -> Vec<SegmentMeta> { ) -> Vec<SegmentMeta> {
self.segment_states self.segment_states
.values() .values()
.filter(|segment_entry| !in_merge_segment_ids.contains(&segment_entry.segment_id())) .filter(|segment_meta| !in_merge_segment_ids.contains(&segment_meta.id()))
.map(|segment_entry| segment_entry.meta().clone()) .cloned()
.collect() .collect()
} }
pub fn segment_entries(&self) -> Vec<SegmentEntry> {
self.segment_states.values().cloned().collect()
}
pub fn segment_metas(&self) -> Vec<SegmentMeta> { pub fn segment_metas(&self) -> Vec<SegmentMeta> {
let mut segment_ids: Vec<SegmentMeta> = self let mut segment_metas: Vec<SegmentMeta> = self
.segment_states .segment_states
.values() .values()
.map(|segment_entry| segment_entry.meta().clone()) .cloned()
.collect(); .collect();
segment_ids.sort_by_key(|meta| meta.id()); segment_metas.sort_by_key(|meta| meta.id());
segment_ids segment_metas
}
pub fn segment_entries(&self, delete_cursor: &DeleteCursor) -> Vec<SegmentEntry> {
self.segment_states
.values()
.map(|segment_meta| {
SegmentEntry::new(segment_meta.clone(), delete_cursor.clone(), None, self.opstamp_constraint)
})
.collect()
} }
pub fn contains_all(&self, segment_ids: &[SegmentId]) -> bool { pub fn contains_all(&self, segment_ids: &[SegmentId]) -> bool {
@@ -66,27 +72,77 @@ impl SegmentRegister {
.all(|segment_id| self.segment_states.contains_key(segment_id)) .all(|segment_id| self.segment_states.contains_key(segment_id))
} }
pub fn add_segment_entry(&mut self, segment_entry: SegmentEntry) { pub fn receive_merge(&mut self,
before_merge_segment_ids: &[SegmentId],
after_merge_segment_entry: &SegmentEntry) {
if after_merge_segment_entry.opstamp() != self.opstamp_constraint {
return;
}
if !self.contains_all(before_merge_segment_ids) {
return;
}
for segment_id in before_merge_segment_ids {
self.segment_states.remove(segment_id);
}
self.register_segment_entry(after_merge_segment_entry.clone());
}
/// Registers a `SegmentEntry`.
///
/// If a segment entry associated to this `SegmentId` is already there,
/// override it with the new `SegmentEntry`.
pub fn register_segment_entry(&mut self, segment_entry: SegmentEntry) {
if self.opstamp_constraint != segment_entry.opstamp() {
panic!(format!(
"Invalid segment. Expect opstamp {}, got {}.",
self.opstamp_constraint,
segment_entry.opstamp()
));
}
if segment_entry.meta().num_docs() == 0 {
return;
}
let segment_id = segment_entry.segment_id(); let segment_id = segment_entry.segment_id();
self.segment_states.insert(segment_id, segment_entry); // Check that we are ok with deletes.
self.segment_states.insert(segment_id, segment_entry.meta().clone());
}
pub fn set_commit(&mut self, opstamp: u64, segment_entries: Vec<SegmentEntry>) {
self.segment_states.clear();
self.opstamp_constraint = opstamp;
for segment_entry in segment_entries {
self.register_segment_entry(segment_entry);
}
} }
pub fn remove_segment(&mut self, segment_id: &SegmentId) { pub fn remove_segment(&mut self, segment_id: &SegmentId) {
self.segment_states.remove(segment_id); self.segment_states.remove(&segment_id);
} }
pub fn get(&self, segment_id: &SegmentId) -> Option<SegmentEntry> { pub fn get(&self, segment_id: &SegmentId, delete_cursor: &DeleteCursor) -> Option<SegmentEntry> {
self.segment_states.get(segment_id).cloned() self.segment_states
.get(&segment_id)
.map(|segment_meta|
SegmentEntry::new(
segment_meta.clone(),
delete_cursor.clone(),
None,
self.opstamp_constraint
))
} }
pub fn new(segment_metas: Vec<SegmentMeta>, delete_cursor: &DeleteCursor) -> SegmentRegister { pub fn new(
segment_metas: Vec<SegmentMeta>,
opstamp: u64,
) -> SegmentRegister {
let mut segment_states = HashMap::new(); let mut segment_states = HashMap::new();
for segment_meta in segment_metas { for segment_meta in segment_metas {
let segment_id = segment_meta.id(); segment_states.insert(segment_meta.id(), segment_meta);
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor.clone(), None); }
segment_states.insert(segment_id, segment_entry); SegmentRegister {
segment_states,
opstamp_constraint: opstamp,
} }
SegmentRegister { segment_states }
} }
} }
@@ -115,22 +171,22 @@ mod tests {
let segment_id_merged = SegmentId::generate_random(); let segment_id_merged = SegmentId::generate_random();
{ {
let segment_meta = SegmentMeta::new(segment_id_a, 0u32); let segment_meta = SegmentMeta::new(segment_id_a, 1u32);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None); let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None, 0u64);
segment_register.add_segment_entry(segment_entry); segment_register.register_segment_entry(segment_entry);
} }
assert_eq!(segment_ids(&segment_register), vec![segment_id_a]); assert_eq!(segment_ids(&segment_register), vec![segment_id_a]);
{ {
let segment_meta = SegmentMeta::new(segment_id_b, 0u32); let segment_meta = SegmentMeta::new(segment_id_b, 2u32);
let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None); let segment_entry = SegmentEntry::new(segment_meta, delete_queue.cursor(), None, 0u64);
segment_register.add_segment_entry(segment_entry); segment_register.register_segment_entry(segment_entry);
} }
segment_register.remove_segment(&segment_id_a);
segment_register.remove_segment(&segment_id_b);
{ {
let segment_meta_merged = SegmentMeta::new(segment_id_merged, 0u32); let segment_meta_merged = SegmentMeta::new(segment_id_merged, 3u32);
let segment_entry = SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None); let segment_entry =
segment_register.add_segment_entry(segment_entry); SegmentEntry::new(segment_meta_merged, delete_queue.cursor(), None, 0u64);
segment_register.receive_merge(&[segment_id_a, segment_id_b], &segment_entry);
segment_register.register_segment_entry(segment_entry);
} }
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]); assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
} }

View File

@@ -62,7 +62,7 @@ pub fn save_new_metas(schema: Schema, directory: &mut Directory) -> Result<()> {
/// Save the index meta file. /// Save the index meta file.
/// This operation is atomic: /// This operation is atomic:
/// Either /// Either
// - it fails, in which case an error is returned, /// - it fails, in which case an error is returned,
/// and the `meta.json` remains untouched, /// and the `meta.json` remains untouched,
/// - it success, and `meta.json` is written /// - it success, and `meta.json` is written
/// and flushed. /// and flushed.
@@ -125,7 +125,7 @@ fn perform_merge(
let segment_meta = SegmentMeta::new(merged_segment.id(), num_docs); let segment_meta = SegmentMeta::new(merged_segment.id(), num_docs);
let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None); let after_merge_segment_entry = SegmentEntry::new(segment_meta.clone(), delete_cursor, None, target_opstamp);
Ok(after_merge_segment_entry) Ok(after_merge_segment_entry)
} }
@@ -155,8 +155,11 @@ impl SegmentUpdater {
stamper: Stamper, stamper: Stamper,
delete_cursor: &DeleteCursor, delete_cursor: &DeleteCursor,
) -> Result<SegmentUpdater> { ) -> Result<SegmentUpdater> {
let index_meta = index.load_metas()?;
let segments = index.searchable_segment_metas()?; let segments = index.searchable_segment_metas()?;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor); let opstamp = index_meta.opstamp;
let segment_manager = SegmentManager::from_segments(segments, delete_cursor, opstamp);
let pool = CpuPoolBuilder::new() let pool = CpuPoolBuilder::new()
.name_prefix("segment_updater") .name_prefix("segment_updater")
.pool_size(1) .pool_size(1)
@@ -280,14 +283,30 @@ impl SegmentUpdater {
.garbage_collect(|| self.0.segment_manager.list_files()); .garbage_collect(|| self.0.segment_manager.list_files());
} }
pub fn commit(&self, opstamp: u64, payload: Option<String>) -> Result<()> { pub fn commit(&self, opstamp: u64, payload: Option<String>, soft: bool) -> Result<()> {
self.run_async(move |segment_updater| { self.run_async(move |segment_updater| {
if segment_updater.is_alive() { if segment_updater.is_alive() {
let segment_entries = segment_updater let segment_entries = segment_updater
.purge_deletes(opstamp) .purge_deletes(opstamp)
.expect("Failed purge deletes"); .expect("Failed purge deletes");
segment_updater.0.segment_manager.commit(segment_entries); if soft {
segment_updater.save_metas(opstamp, payload); // Soft commit.
//
// The list `segment_entries` above is what we might want to use as searchable
// segment. However, we do not want to mark them as committed, and we want
// to keep the current set of committed segment.
segment_updater.0.segment_manager.soft_commit(opstamp, segment_entries);
// ... We do not save the meta file.
} else {
// Hard_commit. We register the new segment entries as committed.
segment_updater
.0
.segment_manager
.commit(opstamp, segment_entries);
// TODO error handling.
segment_updater.save_metas(opstamp, payload);
segment_updater.0.index.directory().flush().unwrap();
}
segment_updater.garbage_collect_files_exec(); segment_updater.garbage_collect_files_exec();
segment_updater.consider_merge_options(); segment_updater.consider_merge_options();
} }
@@ -420,6 +439,7 @@ impl SegmentUpdater {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
merge_candidates.extend(committed_merge_candidates.into_iter()); merge_candidates.extend(committed_merge_candidates.into_iter());
for merge_operation in merge_candidates { for merge_operation in merge_candidates {
match self.start_merge_impl(merge_operation) { match self.start_merge_impl(merge_operation) {
Ok(merge_future) => { Ok(merge_future) => {
@@ -565,9 +585,8 @@ mod tests {
index_writer.delete_term(term); index_writer.delete_term(term);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
let reader = index.reader().unwrap();
index.load_searchers().unwrap(); assert_eq!(reader.searcher().num_docs(), 302);
assert_eq!(index.searcher().num_docs(), 302);
{ {
index_writer index_writer
@@ -575,9 +594,9 @@ mod tests {
.expect("waiting for merging threads"); .expect("waiting for merging threads");
} }
index.load_searchers().unwrap(); reader.reload().unwrap();
assert_eq!(index.searcher().segment_readers().len(), 1); assert_eq!(reader.searcher().segment_readers().len(), 1);
assert_eq!(index.searcher().num_docs(), 302); assert_eq!(reader.searcher().num_docs(), 302);
} }
#[test] #[test]
@@ -636,18 +655,18 @@ mod tests {
.expect("waiting for merging threads"); .expect("waiting for merging threads");
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
assert_eq!(index.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
let seg_ids = index let seg_ids = index
.searchable_segment_ids() .searchable_segment_ids()
.expect("Searchable segments failed."); .expect("Searchable segments failed.");
assert!(seg_ids.is_empty()); assert!(seg_ids.is_empty());
index.load_searchers().unwrap(); reader.reload().unwrap();
assert_eq!(index.searcher().num_docs(), 0); assert_eq!(reader.searcher().num_docs(), 0);
// empty segments should be erased // empty segments should be erased
assert!(index.searchable_segment_metas().unwrap().is_empty()); assert!(index.searchable_segment_metas().unwrap().is_empty());
assert!(index.searcher().segment_readers().is_empty()); assert!(reader.searcher().segment_readers().is_empty());
} }
} }

View File

@@ -171,6 +171,17 @@ impl SegmentWriter {
} }
} }
} }
FieldType::Date(ref int_option) => {
if int_option.is_indexed() {
for field_value in field_values {
let term = Term::from_field_i64(
field_value.field(),
field_value.value().date_value().timestamp(),
);
self.multifield_postings.subscribe(doc_id, &term);
}
}
}
FieldType::I64(ref int_option) => { FieldType::I64(ref int_option) => {
if int_option.is_indexed() { if int_option.is_indexed() {
for field_value in field_values { for field_value in field_values {

View File

@@ -67,7 +67,7 @@ impl Stamper {
pub fn stamps(&self, n: u64) -> Range<u64> { pub fn stamps(&self, n: u64) -> Range<u64> {
let start = self.0.fetch_add(n, Ordering::SeqCst); let start = self.0.fetch_add(n, Ordering::SeqCst);
Range { Range {
start: start, start,
end: start + n, end: start + n,
} }
} }

View File

@@ -75,9 +75,9 @@
//! //!
//! // # Searching //! // # Searching
//! //!
//! index.load_searchers()?; //! let reader = index.reader()?;
//! //!
//! let searcher = index.searcher(); //! let searcher = reader.searcher();
//! //!
//! let query_parser = QueryParser::for_index(&index, vec![title, body]); //! let query_parser = QueryParser::for_index(&index, vec![title, body]);
//! //!
@@ -132,13 +132,13 @@ extern crate byteorder;
extern crate combine; extern crate combine;
extern crate crossbeam; extern crate crossbeam;
extern crate fnv; extern crate fnv;
extern crate fst;
extern crate fst_regex;
extern crate futures; extern crate futures;
extern crate futures_cpupool; extern crate futures_cpupool;
extern crate htmlescape; extern crate htmlescape;
extern crate itertools; extern crate itertools;
extern crate levenshtein_automata; extern crate levenshtein_automata;
#[cfg(feature = "mmap")]
extern crate memmap;
extern crate num_cpus; extern crate num_cpus;
extern crate owning_ref; extern crate owning_ref;
extern crate regex; extern crate regex;
@@ -146,6 +146,7 @@ extern crate rust_stemmers;
extern crate scoped_pool; extern crate scoped_pool;
extern crate serde; extern crate serde;
extern crate stable_deref_trait; extern crate stable_deref_trait;
extern crate tantivy_fst;
extern crate tempdir; extern crate tempdir;
extern crate tempfile; extern crate tempfile;
extern crate uuid; extern crate uuid;
@@ -173,6 +174,7 @@ extern crate downcast_rs;
#[macro_use] #[macro_use]
extern crate fail; extern crate fail;
#[cfg(feature = "mmap")]
#[cfg(test)] #[cfg(test)]
mod functional_test; mod functional_test;
@@ -185,11 +187,15 @@ pub use error::TantivyError;
pub use error::TantivyError as Error; pub use error::TantivyError as Error;
extern crate census; extern crate census;
pub extern crate chrono;
extern crate owned_read; extern crate owned_read;
/// Tantivy result. /// Tantivy result.
pub type Result<T> = std::result::Result<T, error::TantivyError>; pub type Result<T> = std::result::Result<T, error::TantivyError>;
/// Tantivy DateTime
pub type DateTime = chrono::DateTime<chrono::Utc>;
mod common; mod common;
mod core; mod core;
mod indexer; mod indexer;
@@ -210,6 +216,9 @@ pub mod space_usage;
pub mod store; pub mod store;
pub mod termdict; pub mod termdict;
mod reader;
pub use self::reader::{IndexReader, IndexReaderBuilder, ReloadPolicy};
mod snippet; mod snippet;
pub use self::snippet::{Snippet, SnippetGenerator}; pub use self::snippet::{Snippet, SnippetGenerator};
@@ -298,6 +307,7 @@ mod tests {
use Index; use Index;
use IndexWriter; use IndexWriter;
use Postings; use Postings;
use ReloadPolicy;
pub fn assert_nearly_equals(expected: f32, val: f32) { pub fn assert_nearly_equals(expected: f32, val: f32) {
assert!( assert!(
@@ -386,8 +396,8 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
assert_eq!(searcher.doc_freq(&term_a), 3); assert_eq!(searcher.doc_freq(&term_a), 3);
let term_b = Term::from_field_text(text_field, "b"); let term_b = Term::from_field_text(text_field, "b");
@@ -414,8 +424,8 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
index.load_searchers().unwrap(); let index_reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
{ {
let fieldnorm_reader = reader.get_fieldnorms_reader(text_field); let fieldnorm_reader = reader.get_fieldnorms_reader(text_field);
@@ -450,8 +460,8 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0); let segment_reader: &SegmentReader = searcher.segment_reader(0);
let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field); let fieldnorms_reader = segment_reader.get_fieldnorms_reader(text_field);
assert_eq!(fieldnorms_reader.fieldnorm(0), 3); assert_eq!(fieldnorms_reader.fieldnorm(0), 3);
@@ -479,6 +489,11 @@ mod tests {
let term_c = Term::from_field_text(text_field, "c"); let term_c = Term::from_field_text(text_field, "c");
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -500,10 +515,10 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field); let inverted_index = segment_reader.inverted_index(text_field);
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
@@ -511,19 +526,19 @@ mod tests {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 5); assert_eq!(postings.doc(), 5);
assert!(!advance_undeleted(&mut postings, reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 4); assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
} }
{ {
@@ -536,10 +551,10 @@ mod tests {
index_writer.rollback().unwrap(); index_writer.rollback().unwrap();
} }
{ {
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let reader = searcher.segment_reader(0); let seg_reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(term_abcd.field()); let inverted_index = seg_reader.inverted_index(term_abcd.field());
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
@@ -548,19 +563,19 @@ mod tests {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, seg_reader));
assert_eq!(postings.doc(), 5); assert_eq!(postings.doc(), 5);
assert!(!advance_undeleted(&mut postings, reader)); assert!(!advance_undeleted(&mut postings, seg_reader));
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, seg_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, seg_reader));
assert_eq!(postings.doc(), 4); assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, reader)); assert!(!advance_undeleted(&mut postings, seg_reader));
} }
} }
{ {
@@ -573,10 +588,10 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(term_abcd.field()); let inverted_index = segment_reader.inverted_index(term_abcd.field());
assert!(inverted_index assert!(inverted_index
.read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_abcd, IndexRecordOption::WithFreqsAndPositions)
.is_none()); .is_none());
@@ -584,25 +599,25 @@ mod tests {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_a, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(!advance_undeleted(&mut postings, reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_b, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 3); assert_eq!(postings.doc(), 3);
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 4); assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term_c, IndexRecordOption::WithFreqsAndPositions)
.unwrap(); .unwrap();
assert!(advance_undeleted(&mut postings, reader)); assert!(advance_undeleted(&mut postings, segment_reader));
assert_eq!(postings.doc(), 4); assert_eq!(postings.doc(), 4);
assert!(!advance_undeleted(&mut postings, reader)); assert!(!advance_undeleted(&mut postings, segment_reader));
} }
} }
} }
@@ -610,15 +625,15 @@ mod tests {
#[test] #[test]
fn test_indexed_u64() { fn test_indexed_u64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("value", INT_INDEXED); let field = schema_builder.add_u64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.add_document(doc!(field=>1u64)); index_writer.add_document(doc!(field=>1u64));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let term = Term::from_field_u64(field, 1u64); let term = Term::from_field_u64(field, 1u64);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
@@ -633,7 +648,7 @@ mod tests {
#[test] #[test]
fn test_indexed_i64() { fn test_indexed_i64() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_i64_field("value", INT_INDEXED); let value_field = schema_builder.add_i64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -641,8 +656,8 @@ mod tests {
let negative_val = -1i64; let negative_val = -1i64;
index_writer.add_document(doc!(value_field => negative_val)); index_writer.add_document(doc!(value_field => negative_val));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let term = Term::from_field_i64(value_field, negative_val); let term = Term::from_field_i64(value_field, negative_val);
let mut postings = searcher let mut postings = searcher
.segment_reader(0) .segment_reader(0)
@@ -664,8 +679,8 @@ mod tests {
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a")); index_writer.add_document(doc!(text_field=>"a"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
assert!(index.load_searchers().is_ok()); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
segment_reader.inverted_index(absent_field); //< should not panic segment_reader.inverted_index(absent_field); //< should not panic
} }
@@ -676,6 +691,11 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(2, 6_000_000).unwrap();
@@ -701,8 +721,8 @@ mod tests {
remove_document(&mut index_writer, "38"); remove_document(&mut index_writer, "38");
remove_document(&mut index_writer, "34"); remove_document(&mut index_writer, "34");
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 6); assert_eq!(searcher.num_docs(), 6);
} }
@@ -722,8 +742,8 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
index.load_searchers().unwrap(); let index_reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = index_reader.searcher();
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);
let inverted_index = reader.inverted_index(text_field); let inverted_index = reader.inverted_index(text_field);
let term_abcd = Term::from_field_text(text_field, "abcd"); let term_abcd = Term::from_field_text(text_field, "abcd");
@@ -747,7 +767,7 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let reader = index.reader().unwrap();
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -757,8 +777,8 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
{ {
index.load_searchers().unwrap(); reader.reload().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| { let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms); let query = BooleanQuery::new_multiterms_query(terms);
let topdocs = searcher.search(&query, &TestCollector).unwrap(); let topdocs = searcher.search(&query, &TestCollector).unwrap();
@@ -800,25 +820,22 @@ mod tests {
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
.try_into()
.unwrap();
assert_eq!(reader.searcher().num_docs(), 0u64);
{ {
// writing the segment // writing the segment
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
{ index_writer.add_document(doc!(text_field=>"af b"));
let doc = doc!(text_field=>"af b"); index_writer.add_document(doc!(text_field=>"a b c"));
index_writer.add_document(doc); index_writer.add_document(doc!(text_field=>"a b c d"));
}
{
let doc = doc!(text_field=>"a b c");
index_writer.add_document(doc);
}
{
let doc = doc!(text_field=>"a b c d");
index_writer.add_document(doc);
}
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.searcher(); reader.reload().unwrap();
assert_eq!(reader.searcher().num_docs(), 3u64);
} }
#[test] #[test]
@@ -845,7 +862,7 @@ mod tests {
let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST); let fast_field_unsigned = schema_builder.add_u64_field("unsigned", FAST);
let fast_field_signed = schema_builder.add_i64_field("signed", FAST); let fast_field_signed = schema_builder.add_i64_field("signed", FAST);
let text_field = schema_builder.add_text_field("text", TEXT); let text_field = schema_builder.add_text_field("text", TEXT);
let stored_int_field = schema_builder.add_u64_field("text", INT_STORED); let stored_int_field = schema_builder.add_u64_field("text", STORED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -855,9 +872,8 @@ mod tests {
index_writer.add_document(document); index_writer.add_document(document);
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
let reader = index.reader().unwrap();
index.load_searchers().unwrap(); let searcher = reader.searcher();
let searcher = index.searcher();
let segment_reader: &SegmentReader = searcher.segment_reader(0); let segment_reader: &SegmentReader = searcher.segment_reader(0);
{ {
let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(text_field); let fast_field_reader_res = segment_reader.fast_field_reader::<u64>(text_field);

View File

@@ -61,7 +61,7 @@ macro_rules! doc(
}; };
// if there is a trailing comma retry with the trailing comma stripped. // if there is a trailing comma retry with the trailing comma stripped.
($($field:expr => $value:expr),+ ,) => { ($($field:expr => $value:expr),+ ,) => {
doc!( $( $field => $value ), *); doc!( $( $field => $value ), *)
}; };
); );

View File

@@ -151,7 +151,8 @@ impl PositionReader {
if self.ahead != Some(0) { if self.ahead != Some(0) {
// the block currently available is not the block // the block currently available is not the block
// for the current position // for the current position
self.bit_packer.decompress(position_data, self.buffer.as_mut(), num_bits); self.bit_packer
.decompress(position_data, self.buffer.as_mut(), num_bits);
self.ahead = Some(0); self.ahead = Some(0);
} }
let block_len = compressed_block_size(num_bits); let block_len = compressed_block_size(num_bits);

View File

@@ -1,9 +1,9 @@
use bitpacking::BitPacker; use bitpacking::BitPacker;
use bitpacking::BitPacker4x;
use common::BinarySerializable; use common::BinarySerializable;
use common::CountingWriter; use common::CountingWriter;
use positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL}; use positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL};
use std::io::{self, Write}; use std::io::{self, Write};
use bitpacking::BitPacker4x;
pub struct PositionSerializer<W: io::Write> { pub struct PositionSerializer<W: io::Write> {
bit_packer: BitPacker4x, bit_packer: BitPacker4x,
@@ -53,7 +53,9 @@ impl<W: io::Write> PositionSerializer<W> {
fn flush_block(&mut self) -> io::Result<()> { fn flush_block(&mut self) -> io::Result<()> {
let num_bits = self.bit_packer.num_bits(&self.block[..]); let num_bits = self.bit_packer.num_bits(&self.block[..]);
self.write_skiplist.write_all(&[num_bits])?; self.write_skiplist.write_all(&[num_bits])?;
let written_len = self.bit_packer.compress(&self.block[..], &mut self.buffer, num_bits); let written_len = self
.bit_packer
.compress(&self.block[..], &mut self.buffer, num_bits);
self.write_stream.write_all(&self.buffer[..written_len])?; self.write_stream.write_all(&self.buffer[..written_len])?;
self.block.clear(); self.block.clear();
if (self.num_ints % LONG_SKIP_INTERVAL) == 0u64 { if (self.num_ints % LONG_SKIP_INTERVAL) == 0u64 {

View File

@@ -0,0 +1,229 @@
/// This modules define the logic used to search for a doc in a given
/// block. (at most 128 docs)
///
/// Searching within a block is a hotspot when running intersection.
/// so it was worth defining it in its own module.
#[cfg(target_arch = "x86_64")]
mod sse2 {
use postings::compression::COMPRESSION_BLOCK_SIZE;
use std::arch::x86_64::__m128i as DataType;
use std::arch::x86_64::_mm_add_epi32 as op_add;
use std::arch::x86_64::_mm_cmplt_epi32 as op_lt;
use std::arch::x86_64::_mm_load_si128 as op_load; // requires 128-bits alignment
use std::arch::x86_64::_mm_set1_epi32 as set1;
use std::arch::x86_64::_mm_setzero_si128 as set0;
use std::arch::x86_64::_mm_sub_epi32 as op_sub;
use std::arch::x86_64::{_mm_cvtsi128_si32, _mm_shuffle_epi32};
const MASK1: i32 = 78;
const MASK2: i32 = 177;
/// Performs an exhaustive linear search over the
///
/// There is no early exit here. We simply count the
/// number of elements that are `< target`.
pub fn linear_search_sse2_128(arr: &[u32], target: u32) -> usize {
unsafe {
let ptr = arr.as_ptr() as *const DataType;
let vkey = set1(target as i32);
let mut cnt = set0();
// We work over 4 `__m128i` at a time.
// A single `__m128i` actual contains 4 `u32`.
for i in 0..(COMPRESSION_BLOCK_SIZE as isize) / (4 * 4) {
let cmp1 = op_lt(op_load(ptr.offset(i * 4)), vkey);
let cmp2 = op_lt(op_load(ptr.offset(i * 4 + 1)), vkey);
let cmp3 = op_lt(op_load(ptr.offset(i * 4 + 2)), vkey);
let cmp4 = op_lt(op_load(ptr.offset(i * 4 + 3)), vkey);
let sum = op_add(op_add(cmp1, cmp2), op_add(cmp3, cmp4));
cnt = op_sub(cnt, sum);
}
cnt = op_add(cnt, _mm_shuffle_epi32(cnt, MASK1));
cnt = op_add(cnt, _mm_shuffle_epi32(cnt, MASK2));
_mm_cvtsi128_si32(cnt) as usize
}
}
#[cfg(test)]
mod test {
use super::linear_search_sse2_128;
#[test]
fn test_linear_search_sse2_128_u32() {
for i in 0..23 {
dbg!(i);
let arr: Vec<u32> = (0..128).map(|el| el * 2 + 1 << 18).collect();
assert_eq!(linear_search_sse2_128(&arr, arr[64] + 1), 65);
}
}
}
}
/// This `linear search` browser exhaustively through the array.
/// but the early exit is very difficult to predict.
///
/// Coupled with `exponential search` this function is likely
/// to be called with the same `len`
fn linear_search(arr: &[u32], target: u32) -> usize {
arr.iter().map(|&el| if el < target { 1 } else { 0 }).sum()
}
fn exponential_search(arr: &[u32], target: u32) -> (usize, usize) {
let end = arr.len();
let mut begin = 0;
for &pivot in &[1, 3, 7, 15, 31, 63] {
if pivot >= end {
break;
}
if arr[pivot] > target {
return (begin, pivot);
}
begin = pivot;
}
(begin, end)
}
fn galloping(block_docs: &[u32], target: u32) -> usize {
let (start, end) = exponential_search(&block_docs, target);
start + linear_search(&block_docs[start..end], target)
}
/// Tantivy may rely on SIMD instructions to search for a specific document within
/// a given block.
#[derive(Clone, Copy, PartialEq)]
pub enum BlockSearcher {
#[cfg(target_arch = "x86_64")]
SSE2,
Scalar,
}
impl BlockSearcher {
/// Search the first index containing an element greater or equal to
/// the target.
///
/// The results should be equivalent to
/// ```ignore
/// block[..]
// .iter()
// .take_while(|&&val| val < target)
// .count()
/// ```
///
/// The `start` argument is just used to hint that the response is
/// greater than beyond `start`. The implementation may or may not use
/// it for optimization.
///
/// # Assumption
///
/// The array len is > start.
/// The block is sorted
/// The target is assumed greater or equal to the `arr[start]`.
/// The target is assumed smaller or equal to the last element of the block.
///
/// Currently the scalar implementation starts by an exponential search, and
/// then operates a linear search in the result subarray.
///
/// If SSE2 instructions are available in the `(platform, running CPU)`,
/// then we use a different implementation that does an exhaustive linear search over
/// the full block whenever the block is full (`len == 128`). It is surprisingly faster, most likely because of the lack
/// of branch.
pub fn search_in_block(&self, block_docs: &[u32], start: usize, target: u32) -> usize {
#[cfg(target_arch = "x86_64")]
{
use postings::compression::COMPRESSION_BLOCK_SIZE;
if *self == BlockSearcher::SSE2 {
if block_docs.len() == COMPRESSION_BLOCK_SIZE {
return sse2::linear_search_sse2_128(block_docs, target);
}
}
}
start + galloping(&block_docs[start..], target)
}
}
impl Default for BlockSearcher {
fn default() -> BlockSearcher {
#[cfg(target_arch = "x86_64")]
{
if is_x86_feature_detected!("sse2") {
return BlockSearcher::SSE2;
}
}
BlockSearcher::Scalar
}
}
#[cfg(test)]
mod tests {
use super::exponential_search;
use super::linear_search;
use super::BlockSearcher;
#[test]
fn test_linear_search() {
let len: usize = 50;
let arr: Vec<u32> = (0..len).map(|el| 1u32 + (el as u32) * 2).collect();
for target in 1..*arr.last().unwrap() {
let res = linear_search(&arr[..], target);
if res > 0 {
assert!(arr[res - 1] < target);
}
if res < len {
assert!(arr[res] >= target);
}
}
}
#[test]
fn test_exponentiel_search() {
assert_eq!(exponential_search(&[1, 2], 0), (0, 1));
assert_eq!(exponential_search(&[1, 2], 1), (0, 1));
assert_eq!(
exponential_search(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 7),
(3, 7)
);
}
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
let cursor = search_in_block_trivial_but_slow(block, target);
for i in 0..cursor {
assert_eq!(block_searcher.search_in_block(block, i, target), cursor);
}
}
fn util_test_search_in_block_all(block_searcher: BlockSearcher, block: &[u32]) {
use std::collections::HashSet;
let mut targets = HashSet::new();
for (i, val) in block.iter().cloned().enumerate() {
if i > 0 {
targets.insert(val - 1);
}
targets.insert(val);
}
for target in targets {
util_test_search_in_block(block_searcher, block, target);
}
}
fn search_in_block_trivial_but_slow(block: &[u32], target: u32) -> usize {
block.iter().take_while(|&&val| val < target).count()
}
fn test_search_in_block_util(block_searcher: BlockSearcher) {
for len in 1u32..128u32 {
let v: Vec<u32> = (0..len).map(|i| i * 2).collect();
util_test_search_in_block_all(block_searcher, &v[..]);
}
}
#[test]
fn test_search_in_block_scalar() {
test_search_in_block_util(BlockSearcher::Scalar);
}
#[cfg(target_arch = "x86_64")]
#[test]
fn test_search_in_block_sse2() {
test_search_in_block_util(BlockSearcher::SSE2);
}
}

View File

@@ -43,9 +43,14 @@ impl BlockEncoder {
} }
} }
/// We ensure that the OutputBuffer is align on 128 bits
/// in order to run SSE2 linear search on it.
#[repr(align(128))]
struct OutputBuffer([u32; COMPRESSION_BLOCK_SIZE + 1]);
pub struct BlockDecoder { pub struct BlockDecoder {
bitpacker: BitPacker4x, bitpacker: BitPacker4x,
pub output: [u32; COMPRESSION_BLOCK_SIZE + 1], output: OutputBuffer,
pub output_len: usize, pub output_len: usize,
} }
@@ -59,7 +64,7 @@ impl BlockDecoder {
output[COMPRESSION_BLOCK_SIZE] = 0u32; output[COMPRESSION_BLOCK_SIZE] = 0u32;
BlockDecoder { BlockDecoder {
bitpacker: BitPacker4x::new(), bitpacker: BitPacker4x::new(),
output, output: OutputBuffer(output),
output_len: 0, output_len: 0,
} }
} }
@@ -72,23 +77,23 @@ impl BlockDecoder {
) -> usize { ) -> usize {
self.output_len = COMPRESSION_BLOCK_SIZE; self.output_len = COMPRESSION_BLOCK_SIZE;
self.bitpacker self.bitpacker
.decompress_sorted(offset, &compressed_data, &mut self.output, num_bits) .decompress_sorted(offset, &compressed_data, &mut self.output.0, num_bits)
} }
pub fn uncompress_block_unsorted(&mut self, compressed_data: &[u8], num_bits: u8) -> usize { pub fn uncompress_block_unsorted(&mut self, compressed_data: &[u8], num_bits: u8) -> usize {
self.output_len = COMPRESSION_BLOCK_SIZE; self.output_len = COMPRESSION_BLOCK_SIZE;
self.bitpacker self.bitpacker
.decompress(&compressed_data, &mut self.output, num_bits) .decompress(&compressed_data, &mut self.output.0, num_bits)
} }
#[inline] #[inline]
pub fn output_array(&self) -> &[u32] { pub fn output_array(&self) -> &[u32] {
&self.output[..self.output_len] &self.output.0[..self.output_len]
} }
#[inline] #[inline]
pub fn output(&self, idx: usize) -> u32 { pub fn output(&self, idx: usize) -> u32 {
self.output[idx] self.output.0[idx]
} }
} }
@@ -159,12 +164,12 @@ impl VIntDecoder for BlockDecoder {
num_els: usize, num_els: usize,
) -> usize { ) -> usize {
self.output_len = num_els; self.output_len = num_els;
vint::uncompress_sorted(compressed_data, &mut self.output[..num_els], offset) vint::uncompress_sorted(compressed_data, &mut self.output.0[..num_els], offset)
} }
fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize { fn uncompress_vint_unsorted<'a>(&mut self, compressed_data: &'a [u8], num_els: usize) -> usize {
self.output_len = num_els; self.output_len = num_els;
vint::uncompress_unsorted(compressed_data, &mut self.output[..num_els]) vint::uncompress_unsorted(compressed_data, &mut self.output.0[..num_els])
} }
} }

View File

@@ -2,6 +2,7 @@
Postings module (also called inverted index) Postings module (also called inverted index)
*/ */
mod block_search;
pub(crate) mod compression; pub(crate) mod compression;
/// Postings module /// Postings module
/// ///
@@ -16,6 +17,8 @@ mod skip;
mod stacker; mod stacker;
mod term_info; mod term_info;
pub(crate) use self::block_search::BlockSearcher;
pub(crate) use self::postings_writer::MultiFieldPostingsWriter; pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer}; pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
@@ -31,7 +34,6 @@ pub(crate) use self::stacker::compute_table_size;
pub use common::HasLen; pub use common::HasLen;
pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32; pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32;
pub(crate) type UnorderedTermId = u64; pub(crate) type UnorderedTermId = u64;
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
@@ -58,7 +60,7 @@ pub mod tests {
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use schema::Field; use schema::Field;
use schema::IndexRecordOption; use schema::IndexRecordOption;
use schema::{Document, Schema, Term, INT_INDEXED, STRING, TEXT}; use schema::{Document, Schema, Term, INDEXED, STRING, TEXT};
use std::iter; use std::iter;
use DocId; use DocId;
use Score; use Score;
@@ -101,14 +103,11 @@ pub mod tests {
} }
index_writer.add_document(doc!(title => r#"abc be be be be abc"#)); index_writer.add_document(doc!(title => r#"abc be be be be abc"#));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap();
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let inverted_index = searcher.segment_reader(0u32).inverted_index(title); let inverted_index = searcher.segment_reader(0u32).inverted_index(title);
let term = Term::from_field_text(title, "abc"); let term = Term::from_field_text(title, "abc");
let mut positions = Vec::new(); let mut positions = Vec::new();
{ {
let mut postings = inverted_index let mut postings = inverted_index
.read_postings(&term, IndexRecordOption::WithFreqsAndPositions) .read_postings(&term, IndexRecordOption::WithFreqsAndPositions)
@@ -293,9 +292,8 @@ pub mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap();
let term_a = Term::from_field_text(text_field, "a"); let term_a = Term::from_field_text(text_field, "a");
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut postings = segment_reader let mut postings = segment_reader
.inverted_index(text_field) .inverted_index(text_field)
@@ -317,7 +315,7 @@ pub mod tests {
let index = { let index = {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let value_field = schema_builder.add_u64_field("value", INT_INDEXED); let value_field = schema_builder.add_u64_field("value", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -332,10 +330,9 @@ pub mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap();
index index
}; };
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
// check that the basic usage works // check that the basic usage works
@@ -403,8 +400,7 @@ pub mod tests {
index_writer.delete_term(term_0); index_writer.delete_term(term_0);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
// make sure seeking still works // make sure seeking still works
@@ -451,12 +447,9 @@ pub mod tests {
{ {
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
index_writer.delete_term(term_1); index_writer.delete_term(term_1);
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
// finally, check that it's empty // finally, check that it's empty
{ {
@@ -512,7 +505,6 @@ pub mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap();
index index
}; };
} }

View File

@@ -33,9 +33,10 @@ fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<PostingsWriter> {
} }
}) })
.unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed()), .unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed()),
FieldType::U64(_) | FieldType::I64(_) | FieldType::HierarchicalFacet => { FieldType::U64(_)
SpecializedPostingsWriter::<NothingRecorder>::new_boxed() | FieldType::I64(_)
} | FieldType::Date(_)
| FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(),
FieldType::Bytes => { FieldType::Bytes => {
// FieldType::Bytes cannot actually be indexed. // FieldType::Bytes cannot actually be indexed.
// TODO fix during the indexer refactoring described in #276 // TODO fix during the indexer refactoring described in #276
@@ -51,6 +52,31 @@ pub struct MultiFieldPostingsWriter {
per_field_postings_writers: Vec<Box<PostingsWriter>>, per_field_postings_writers: Vec<Box<PostingsWriter>>,
} }
fn make_field_partition(
term_offsets: &[(&[u8], Addr, UnorderedTermId)],
) -> Vec<(Field, usize, usize)> {
let term_offsets_it = term_offsets
.iter()
.map(|(key, _, _)| Term::wrap(key).field())
.enumerate();
let mut prev_field = Field(u32::max_value());
let mut fields = vec![];
let mut offsets = vec![];
for (offset, field) in term_offsets_it {
if field != prev_field {
prev_field = field;
fields.push(field);
offsets.push(offset);
}
}
offsets.push(term_offsets.len());
let mut field_offsets = vec![];
for i in 0..fields.len() {
field_offsets.push((fields[i], offsets[i], offsets[i + 1]));
}
field_offsets
}
impl MultiFieldPostingsWriter { impl MultiFieldPostingsWriter {
/// Create a new `MultiFieldPostingsWriter` given /// Create a new `MultiFieldPostingsWriter` given
/// a schema and a heap. /// a schema and a heap.
@@ -96,36 +122,16 @@ impl MultiFieldPostingsWriter {
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> { ) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = self let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
.term_index self.term_index.iter().collect();
.iter()
.map(|(term_bytes, addr, bucket_id)| (term_bytes, addr, bucket_id as UnorderedTermId))
.collect();
term_offsets.sort_unstable_by_key(|&(k, _, _)| k); term_offsets.sort_unstable_by_key(|&(k, _, _)| k);
let mut offsets: Vec<(Field, usize)> = vec![];
let term_offsets_it = term_offsets
.iter()
.cloned()
.map(|(key, _, _)| Term::wrap(key).field())
.enumerate();
let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> = let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> =
HashMap::new(); HashMap::new();
let mut prev_field = Field(u32::max_value()); let field_offsets = make_field_partition(&term_offsets);
for (offset, field) in term_offsets_it {
if field != prev_field {
offsets.push((field, offset));
prev_field = field;
}
}
offsets.push((Field(0), term_offsets.len()));
for i in 0..(offsets.len() - 1) {
let (field, start) = offsets[i];
let (_, stop) = offsets[i + 1];
for (field, start, stop) in field_offsets {
let field_entry = self.schema.get_field_entry(field); let field_entry = self.schema.get_field_entry(field);
match *field_entry.field_type() { match *field_entry.field_type() {
@@ -143,7 +149,7 @@ impl MultiFieldPostingsWriter {
.collect(); .collect();
unordered_term_mappings.insert(field, mapping); unordered_term_mappings.insert(field, mapping);
} }
FieldType::U64(_) | FieldType::I64(_) => {} FieldType::U64(_) | FieldType::I64(_) | FieldType::Date(_) => {}
FieldType::Bytes => {} FieldType::Bytes => {}
} }

View File

@@ -4,7 +4,6 @@ use postings::FieldSerializer;
use std::io; use std::io;
use DocId; use DocId;
const EMPTY_ARRAY: [u32; 0] = [0u32; 0];
const POSITION_END: u32 = 0; const POSITION_END: u32 = 0;
#[derive(Default)] #[derive(Default)]
@@ -115,7 +114,7 @@ impl Recorder for NothingRecorder {
let buffer = buffer_lender.lend_u8(); let buffer = buffer_lender.lend_u8();
self.stack.read_to_end(heap, buffer); self.stack.read_to_end(heap, buffer);
for doc in VInt32Reader::new(&buffer[..]) { for doc in VInt32Reader::new(&buffer[..]) {
serializer.write_doc(doc as u32, 0u32, &EMPTY_ARRAY)?; serializer.write_doc(doc as u32, 0u32, &[][..])?;
} }
Ok(()) Ok(())
} }
@@ -168,7 +167,7 @@ impl Recorder for TermFrequencyRecorder {
let mut u32_it = VInt32Reader::new(&buffer[..]); let mut u32_it = VInt32Reader::new(&buffer[..]);
while let Some(doc) = u32_it.next() { while let Some(doc) = u32_it.next() {
let term_freq = u32_it.next().unwrap_or(self.current_tf); let term_freq = u32_it.next().unwrap_or(self.current_tf);
serializer.write_doc(doc as u32, term_freq, &EMPTY_ARRAY)?; serializer.write_doc(doc as u32, term_freq, &[][..])?;
} }
Ok(()) Ok(())

View File

@@ -2,22 +2,21 @@ use common::BitSet;
use common::HasLen; use common::HasLen;
use common::{BinarySerializable, VInt}; use common::{BinarySerializable, VInt};
use docset::{DocSet, SkipResult}; use docset::{DocSet, SkipResult};
use fst::Streamer;
use owned_read::OwnedRead; use owned_read::OwnedRead;
use positions::PositionReader; use positions::PositionReader;
use postings::compression::compressed_block_size; use postings::compression::compressed_block_size;
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE}; use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
use postings::serializer::PostingsSerializer; use postings::serializer::PostingsSerializer;
use postings::BlockSearcher;
use postings::FreqReadingOption; use postings::FreqReadingOption;
use postings::Postings; use postings::Postings;
use postings::SkipReader; use postings::SkipReader;
use postings::USE_SKIP_INFO_LIMIT; use postings::USE_SKIP_INFO_LIMIT;
use schema::IndexRecordOption; use schema::IndexRecordOption;
use std::cmp::Ordering; use std::cmp::Ordering;
use tantivy_fst::Streamer;
use DocId; use DocId;
const EMPTY_ARR: [u8; 0] = [];
struct PositionComputer { struct PositionComputer {
// store the amount of position int // store the amount of position int
// before reading positions. // before reading positions.
@@ -62,6 +61,7 @@ pub struct SegmentPostings {
block_cursor: BlockSegmentPostings, block_cursor: BlockSegmentPostings,
cur: usize, cur: usize,
position_computer: Option<PositionComputer>, position_computer: Option<PositionComputer>,
block_searcher: BlockSearcher,
} }
impl SegmentPostings { impl SegmentPostings {
@@ -72,6 +72,7 @@ impl SegmentPostings {
block_cursor: empty_block_cursor, block_cursor: empty_block_cursor,
cur: COMPRESSION_BLOCK_SIZE, cur: COMPRESSION_BLOCK_SIZE,
position_computer: None, position_computer: None,
block_searcher: BlockSearcher::default(),
} }
} }
@@ -119,42 +120,31 @@ impl SegmentPostings {
block_cursor: segment_block_postings, block_cursor: segment_block_postings,
cur: COMPRESSION_BLOCK_SIZE, // cursor within the block cur: COMPRESSION_BLOCK_SIZE, // cursor within the block
position_computer: positions_stream_opt.map(PositionComputer::new), position_computer: positions_stream_opt.map(PositionComputer::new),
block_searcher: BlockSearcher::default(),
} }
} }
} }
fn linear_search(arr: &[u32], target: u32) -> usize {
arr.iter().map(|&el| if el < target { 1 } else { 0 }).sum()
}
fn exponential_search(arr: &[u32], target: u32) -> (usize, usize) {
let end = arr.len();
let mut begin = 0;
for &pivot in &[1, 3, 7, 15, 31, 63] {
if pivot >= end {
break;
}
if arr[pivot] > target {
return (begin, pivot);
}
begin = pivot;
}
(begin, end)
}
/// Search the first index containing an element greater or equal to the target.
///
/// # Assumption
///
/// The array is assumed non empty.
/// The target is assumed greater or equal to the first element.
/// The target is assumed smaller or equal to the last element.
fn search_within_block(block_docs: &[u32], target: u32) -> usize {
let (start, end) = exponential_search(block_docs, target);
start + linear_search(&block_docs[start..end], target)
}
impl DocSet for SegmentPostings { impl DocSet for SegmentPostings {
// goes to the next element.
// next needs to be called a first time to point to the correct element.
#[inline]
fn advance(&mut self) -> bool {
if self.position_computer.is_some() {
let term_freq = self.term_freq() as usize;
self.position_computer.as_mut().unwrap().add_skip(term_freq);
}
self.cur += 1;
if self.cur >= self.block_cursor.block_len() {
self.cur = 0;
if !self.block_cursor.advance() {
self.cur = COMPRESSION_BLOCK_SIZE;
return false;
}
}
true
}
fn skip_next(&mut self, target: DocId) -> SkipResult { fn skip_next(&mut self, target: DocId) -> SkipResult {
if !self.advance() { if !self.advance() {
return SkipResult::End; return SkipResult::End;
@@ -213,9 +203,8 @@ impl DocSet for SegmentPostings {
// we're in the right block now, start with an exponential search // we're in the right block now, start with an exponential search
let block_docs = self.block_cursor.docs(); let block_docs = self.block_cursor.docs();
let new_cur = self let new_cur = self
.cur .block_searcher
.wrapping_add(search_within_block(&block_docs[self.cur..], target)); .search_in_block(&block_docs, self.cur, target);
if need_positions { if need_positions {
sum_freqs_skipped += self.block_cursor.freqs()[self.cur..new_cur] sum_freqs_skipped += self.block_cursor.freqs()[self.cur..new_cur]
.iter() .iter()
@@ -237,29 +226,6 @@ impl DocSet for SegmentPostings {
} }
} }
// goes to the next element.
// next needs to be called a first time to point to the correct element.
#[inline]
fn advance(&mut self) -> bool {
if self.position_computer.is_some() {
let term_freq = self.term_freq() as usize;
self.position_computer.as_mut().unwrap().add_skip(term_freq);
}
self.cur += 1;
if self.cur >= self.block_cursor.block_len() {
self.cur = 0;
if !self.block_cursor.advance() {
self.cur = COMPRESSION_BLOCK_SIZE;
return false;
}
}
true
}
fn size_hint(&self) -> u32 {
self.len() as u32
}
/// Return the current document's `DocId`. /// Return the current document's `DocId`.
#[inline] #[inline]
fn doc(&self) -> DocId { fn doc(&self) -> DocId {
@@ -271,6 +237,10 @@ impl DocSet for SegmentPostings {
docs[self.cur] docs[self.cur]
} }
fn size_hint(&self) -> u32 {
self.len() as u32
}
fn append_to_bitset(&mut self, bitset: &mut BitSet) { fn append_to_bitset(&mut self, bitset: &mut BitSet) {
// finish the current block // finish the current block
if self.advance() { if self.advance() {
@@ -369,7 +339,7 @@ impl BlockSegmentPostings {
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data); let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
let skip_reader = match skip_data_opt { let skip_reader = match skip_data_opt {
Some(skip_data) => SkipReader::new(skip_data, record_option), Some(skip_data) => SkipReader::new(skip_data, record_option),
None => SkipReader::new(OwnedRead::new(&EMPTY_ARR[..]), record_option), None => SkipReader::new(OwnedRead::new(&[][..]), record_option),
}; };
let doc_freq = doc_freq as usize; let doc_freq = doc_freq as usize;
let num_vint_docs = doc_freq % COMPRESSION_BLOCK_SIZE; let num_vint_docs = doc_freq % COMPRESSION_BLOCK_SIZE;
@@ -403,7 +373,7 @@ impl BlockSegmentPostings {
if let Some(skip_data) = skip_data_opt { if let Some(skip_data) = skip_data_opt {
self.skip_reader.reset(skip_data); self.skip_reader.reset(skip_data);
} else { } else {
self.skip_reader.reset(OwnedRead::new(&EMPTY_ARR[..])) self.skip_reader.reset(OwnedRead::new(&[][..]))
} }
self.doc_offset = 0; self.doc_offset = 0;
self.doc_freq = doc_freq as usize; self.doc_freq = doc_freq as usize;
@@ -616,39 +586,20 @@ impl<'b> Streamer<'b> for BlockSegmentPostings {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::exponential_search;
use super::linear_search;
use super::search_within_block;
use super::BlockSegmentPostings; use super::BlockSegmentPostings;
use super::BlockSegmentPostingsSkipResult; use super::BlockSegmentPostingsSkipResult;
use super::SegmentPostings; use super::SegmentPostings;
use common::HasLen; use common::HasLen;
use core::Index; use core::Index;
use docset::DocSet; use docset::DocSet;
use fst::Streamer;
use schema::IndexRecordOption; use schema::IndexRecordOption;
use schema::Schema; use schema::Schema;
use schema::Term; use schema::Term;
use schema::INT_INDEXED; use schema::INDEXED;
use tantivy_fst::Streamer;
use DocId; use DocId;
use SkipResult; use SkipResult;
#[test]
fn test_linear_search() {
let len: usize = 50;
let arr: Vec<u32> = (0..len).map(|el| 1u32 + (el as u32) * 2).collect();
for target in 1..*arr.last().unwrap() {
let res = linear_search(&arr[..], target);
if res > 0 {
assert!(arr[res - 1] < target);
}
if res < len {
assert!(arr[res] >= target);
}
}
}
#[test] #[test]
fn test_empty_segment_postings() { fn test_empty_segment_postings() {
let mut postings = SegmentPostings::empty(); let mut postings = SegmentPostings::empty();
@@ -664,56 +615,6 @@ mod tests {
assert_eq!(postings.doc_freq(), 0); assert_eq!(postings.doc_freq(), 0);
} }
fn search_within_block_trivial_but_slow(block: &[u32], target: u32) -> usize {
block
.iter()
.cloned()
.enumerate()
.filter(|&(_, ref val)| *val >= target)
.next()
.unwrap()
.0
}
#[test]
fn test_exponentiel_search() {
assert_eq!(exponential_search(&[1, 2], 0), (0, 1));
assert_eq!(exponential_search(&[1, 2], 1), (0, 1));
assert_eq!(
exponential_search(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 7),
(3, 7)
);
}
fn util_test_search_within_block(block: &[u32], target: u32) {
assert_eq!(
search_within_block(block, target),
search_within_block_trivial_but_slow(block, target)
);
}
fn util_test_search_within_block_all(block: &[u32]) {
use std::collections::HashSet;
let mut targets = HashSet::new();
for (i, val) in block.iter().cloned().enumerate() {
if i > 0 {
targets.insert(val - 1);
}
targets.insert(val);
}
for target in targets {
util_test_search_within_block(block, target);
}
}
#[test]
fn test_search_within_block() {
for len in 1u32..128u32 {
let v: Vec<u32> = (0..len).map(|i| i * 2).collect();
util_test_search_within_block_all(&v[..]);
}
}
#[test] #[test]
fn test_block_segment_postings() { fn test_block_segment_postings() {
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>()); let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
@@ -762,7 +663,7 @@ mod tests {
fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings { fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED); let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -775,8 +676,7 @@ mod tests {
last_doc = doc + 1; last_doc = doc + 1;
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let inverted_index = segment_reader.inverted_index(int_field); let inverted_index = segment_reader.inverted_index(int_field);
let term = Term::from_field_u64(int_field, 0u64); let term = Term::from_field_u64(int_field, 0u64);
@@ -833,7 +733,7 @@ mod tests {
#[test] #[test]
fn test_reset_block_segment_postings() { fn test_reset_block_segment_postings() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let int_field = schema_builder.add_u64_field("id", INT_INDEXED); let int_field = schema_builder.add_u64_field("id", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
@@ -844,8 +744,7 @@ mod tests {
index_writer.add_document(doc); index_writer.add_document(doc);
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let mut block_segments; let mut block_segments;

View File

@@ -5,12 +5,11 @@ use self::murmurhash32::murmurhash2;
use super::{Addr, MemoryArena}; use super::{Addr, MemoryArena};
use byteorder::{ByteOrder, NativeEndian}; use byteorder::{ByteOrder, NativeEndian};
use postings::stacker::memory_arena::store; use postings::stacker::memory_arena::store;
use postings::UnorderedTermId;
use std::iter; use std::iter;
use std::mem; use std::mem;
use std::slice; use std::slice;
pub type BucketId = usize;
/// Returns the actual memory size in bytes /// Returns the actual memory size in bytes
/// required to create a table of size $2^num_bits$. /// required to create a table of size $2^num_bits$.
pub fn compute_table_size(num_bits: usize) -> usize { pub fn compute_table_size(num_bits: usize) -> usize {
@@ -28,6 +27,7 @@ pub fn compute_table_size(num_bits: usize) -> usize {
struct KeyValue { struct KeyValue {
key_value_addr: Addr, key_value_addr: Addr,
hash: u32, hash: u32,
unordered_term_id: UnorderedTermId,
} }
impl Default for KeyValue { impl Default for KeyValue {
@@ -35,6 +35,7 @@ impl Default for KeyValue {
KeyValue { KeyValue {
key_value_addr: Addr::null_pointer(), key_value_addr: Addr::null_pointer(),
hash: 0u32, hash: 0u32,
unordered_term_id: UnorderedTermId::default(),
} }
} }
} }
@@ -59,6 +60,7 @@ pub struct TermHashMap {
pub heap: MemoryArena, pub heap: MemoryArena,
mask: usize, mask: usize,
occupied: Vec<usize>, occupied: Vec<usize>,
len: usize,
} }
struct QuadraticProbing { struct QuadraticProbing {
@@ -85,13 +87,13 @@ pub struct Iter<'a> {
} }
impl<'a> Iterator for Iter<'a> { impl<'a> Iterator for Iter<'a> {
type Item = (&'a [u8], Addr, BucketId); type Item = (&'a [u8], Addr, UnorderedTermId);
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
self.inner.next().cloned().map(move |bucket: usize| { self.inner.next().cloned().map(move |bucket: usize| {
let kv = self.hashmap.table[bucket]; let kv = self.hashmap.table[bucket];
let (key, offset): (&'a [u8], Addr) = self.hashmap.get_key_value(kv.key_value_addr); let (key, offset): (&'a [u8], Addr) = self.hashmap.get_key_value(kv.key_value_addr);
(key, offset, bucket as BucketId) (key, offset, kv.unordered_term_id)
}) })
} }
} }
@@ -106,6 +108,7 @@ impl TermHashMap {
heap, heap,
mask: table_size - 1, mask: table_size - 1,
occupied: Vec::with_capacity(table_size / 2), occupied: Vec::with_capacity(table_size / 2),
len: 0,
} }
} }
@@ -139,12 +142,16 @@ impl TermHashMap {
} }
} }
pub fn set_bucket(&mut self, hash: u32, key_value_addr: Addr, bucket: usize) { fn set_bucket(&mut self, hash: u32, key_value_addr: Addr, bucket: usize) -> UnorderedTermId {
self.occupied.push(bucket); self.occupied.push(bucket);
let unordered_term_id = self.len as UnorderedTermId;
self.len += 1;
self.table[bucket] = KeyValue { self.table[bucket] = KeyValue {
key_value_addr, key_value_addr,
hash, hash,
unordered_term_id,
}; };
unordered_term_id
} }
pub fn iter(&self) -> Iter { pub fn iter(&self) -> Iter {
@@ -184,7 +191,11 @@ impl TermHashMap {
/// will be in charge of returning a default value. /// will be in charge of returning a default value.
/// If the key already as an associated value, then it will be passed /// If the key already as an associated value, then it will be passed
/// `Some(previous_value)`. /// `Some(previous_value)`.
pub fn mutate_or_create<S, V, TMutator>(&mut self, key: S, mut updater: TMutator) -> BucketId pub fn mutate_or_create<S, V, TMutator>(
&mut self,
key: S,
mut updater: TMutator,
) -> UnorderedTermId
where where
S: AsRef<[u8]>, S: AsRef<[u8]>,
V: Copy + 'static, V: Copy + 'static,
@@ -200,6 +211,7 @@ impl TermHashMap {
let bucket = probe.next_probe(); let bucket = probe.next_probe();
let kv: KeyValue = self.table[bucket]; let kv: KeyValue = self.table[bucket];
if kv.is_empty() { if kv.is_empty() {
// The key does not exists yet.
let val = updater(None); let val = updater(None);
let num_bytes = let num_bytes =
std::mem::size_of::<u16>() + key_bytes.len() + std::mem::size_of::<V>(); std::mem::size_of::<u16>() + key_bytes.len() + std::mem::size_of::<V>();
@@ -211,8 +223,7 @@ impl TermHashMap {
data[2..stop].copy_from_slice(key_bytes); data[2..stop].copy_from_slice(key_bytes);
store(&mut data[stop..], val); store(&mut data[stop..], val);
} }
self.set_bucket(hash, key_addr, bucket); return self.set_bucket(hash, key_addr, bucket);
return bucket as BucketId;
} else if kv.hash == hash { } else if kv.hash == hash {
if let Some(val_addr) = if let Some(val_addr) =
self.get_value_addr_if_key_match(key_bytes, kv.key_value_addr) self.get_value_addr_if_key_match(key_bytes, kv.key_value_addr)
@@ -220,7 +231,7 @@ impl TermHashMap {
let v = self.heap.read(val_addr); let v = self.heap.read(val_addr);
let new_v = updater(Some(v)); let new_v = updater(Some(v));
self.heap.write_at(val_addr, new_v); self.heap.write_at(val_addr, new_v);
return bucket as BucketId; return kv.unordered_term_id;
} }
} }
} }

View File

@@ -101,8 +101,9 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index_writer.add_document(doc!(field=>"ccc")); index_writer.add_document(doc!(field=>"ccc"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); reader.reload().unwrap();
let searcher = reader.searcher();
let weight = AllQuery.weight(&searcher, false).unwrap(); let weight = AllQuery.weight(&searcher, false).unwrap();
{ {
let reader = searcher.segment_reader(0); let reader = searcher.segment_reader(0);

View File

@@ -1,10 +1,10 @@
use common::BitSet; use common::BitSet;
use core::SegmentReader; use core::SegmentReader;
use fst::Automaton;
use query::BitSetDocSet; use query::BitSetDocSet;
use query::ConstScorer; use query::ConstScorer;
use query::{Scorer, Weight}; use query::{Scorer, Weight};
use schema::{Field, IndexRecordOption}; use schema::{Field, IndexRecordOption};
use tantivy_fst::Automaton;
use termdict::{TermDictionary, TermStreamer}; use termdict::{TermDictionary, TermStreamer};
use Result; use Result;

View File

@@ -51,7 +51,6 @@ mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap();
(index, text_field) (index, text_field)
} }
@@ -60,7 +59,8 @@ mod tests {
let (index, text_field) = aux_test_helper(); let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
let query = query_parser.parse_query("(+a +b) d").unwrap(); let query = query_parser.parse_query("(+a +b) d").unwrap();
assert_eq!(query.count(&*index.searcher()).unwrap(), 3); let searcher = index.reader().unwrap().searcher();
assert_eq!(query.count(&searcher).unwrap(), 3);
} }
#[test] #[test]
@@ -68,7 +68,7 @@ mod tests {
let (index, text_field) = aux_test_helper(); let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
let query = query_parser.parse_query("+a").unwrap(); let query = query_parser.parse_query("+a").unwrap();
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap(); let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
assert!(scorer.is::<TermScorer>()); assert!(scorer.is::<TermScorer>());
@@ -78,7 +78,7 @@ mod tests {
pub fn test_boolean_termonly_intersection() { pub fn test_boolean_termonly_intersection() {
let (index, text_field) = aux_test_helper(); let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
{ {
let query = query_parser.parse_query("+a +b +c").unwrap(); let query = query_parser.parse_query("+a +b +c").unwrap();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
@@ -97,7 +97,7 @@ mod tests {
pub fn test_boolean_reqopt() { pub fn test_boolean_reqopt() {
let (index, text_field) = aux_test_helper(); let (index, text_field) = aux_test_helper();
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
{ {
let query = query_parser.parse_query("+a b").unwrap(); let query = query_parser.parse_query("+a b").unwrap();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
@@ -126,10 +126,13 @@ mod tests {
query query
}; };
let reader = index.reader().unwrap();
let matching_docs = |boolean_query: &Query| { let matching_docs = |boolean_query: &Query| {
let searcher = index.searcher(); reader
let test_docs = searcher.search(boolean_query, &TestCollector).unwrap(); .searcher()
test_docs .search(boolean_query, &TestCollector)
.unwrap()
.docs() .docs()
.iter() .iter()
.cloned() .cloned()
@@ -185,10 +188,12 @@ mod tests {
let query: Box<Query> = Box::new(term_query); let query: Box<Query> = Box::new(term_query);
query query
}; };
let reader = index.reader().unwrap();
let score_docs = |boolean_query: &Query| { let score_docs = |boolean_query: &Query| {
let searcher = index.searcher(); let fruit = reader
let fruit = searcher.search(boolean_query, &TestCollector).unwrap(); .searcher()
.search(boolean_query, &TestCollector)
.unwrap();
fruit.scores().to_vec() fruit.scores().to_vec()
}; };

View File

@@ -52,9 +52,8 @@ lazy_static! {
/// )); /// ));
/// index_writer.commit().unwrap(); /// index_writer.commit().unwrap();
/// } /// }
/// /// let reader = index.reader()?;
/// index.load_searchers()?; /// let searcher = reader.searcher();
/// let searcher = index.searcher();
/// ///
/// { /// {
/// ///
@@ -141,8 +140,8 @@ mod test {
)); ));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
{ {
let term = Term::from_field_text(country_field, "japon"); let term = Term::from_field_text(country_field, "japon");

View File

@@ -14,41 +14,35 @@ use Score;
/// specialized implementation if the two /// specialized implementation if the two
/// shortest scorers are `TermScorer`s. /// shortest scorers are `TermScorer`s.
pub fn intersect_scorers(mut scorers: Vec<Box<Scorer>>) -> Box<Scorer> { pub fn intersect_scorers(mut scorers: Vec<Box<Scorer>>) -> Box<Scorer> {
if scorers.is_empty() {
return Box::new(EmptyScorer);
}
if scorers.len() == 1 {
return scorers.pop().unwrap();
}
// We know that we have at least 2 elements.
let num_docsets = scorers.len(); let num_docsets = scorers.len();
scorers.sort_by(|left, right| right.size_hint().cmp(&left.size_hint())); scorers.sort_by(|left, right| right.size_hint().cmp(&left.size_hint()));
let rarest_opt = scorers.pop(); let left = scorers.pop().unwrap();
let second_rarest_opt = scorers.pop(); let right = scorers.pop().unwrap();
scorers.reverse(); scorers.reverse();
match (rarest_opt, second_rarest_opt) { let all_term_scorers = [&left, &right]
(None, None) => Box::new(EmptyScorer), .iter()
(Some(single_docset), None) => single_docset, .all(|&scorer| scorer.is::<TermScorer>());
(Some(left), Some(right)) => { if all_term_scorers {
{ return Box::new(Intersection {
let all_term_scorers = [&left, &right] left: *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
.iter() right: *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap()),
.all(|&scorer| scorer.is::<TermScorer>()); others: scorers,
if all_term_scorers { num_docsets,
let left = *(left.downcast::<TermScorer>().map_err(|_| ()).unwrap()); });
let right = *(right.downcast::<TermScorer>().map_err(|_| ()).unwrap());
return Box::new(Intersection {
left,
right,
others: scorers,
num_docsets,
});
}
}
Box::new(Intersection {
left,
right,
others: scorers,
num_docsets,
})
}
_ => {
unreachable!();
}
} }
Box::new(Intersection {
left,
right,
others: scorers,
num_docsets,
})
} }
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s. /// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
@@ -124,7 +118,6 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
return false; return false;
} }
} }
match left.skip_next(candidate) { match left.skip_next(candidate) {
SkipResult::Reached => { SkipResult::Reached => {
break; break;
@@ -140,35 +133,36 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOt
} }
// test the remaining scorers; // test the remaining scorers;
for (ord, docset) in self.others.iter_mut().enumerate() { for (ord, docset) in self.others.iter_mut().enumerate() {
if ord != other_candidate_ord { if ord == other_candidate_ord {
// `candidate_ord` is already at the continue;
// right position. }
// // `candidate_ord` is already at the
// Calling `skip_next` would advance this docset // right position.
// and miss it. //
match docset.skip_next(candidate) { // Calling `skip_next` would advance this docset
SkipResult::Reached => {} // and miss it.
SkipResult::OverStep => { match docset.skip_next(candidate) {
// this is not in the intersection, SkipResult::Reached => {}
// let's update our candidate. SkipResult::OverStep => {
candidate = docset.doc(); // this is not in the intersection,
match left.skip_next(candidate) { // let's update our candidate.
SkipResult::Reached => { candidate = docset.doc();
other_candidate_ord = ord; match left.skip_next(candidate) {
} SkipResult::Reached => {
SkipResult::OverStep => { other_candidate_ord = ord;
candidate = left.doc(); }
other_candidate_ord = usize::max_value(); SkipResult::OverStep => {
} candidate = left.doc();
SkipResult::End => { other_candidate_ord = usize::max_value();
return false; }
} SkipResult::End => {
return false;
} }
continue 'outer;
}
SkipResult::End => {
return false;
} }
continue 'outer;
}
SkipResult::End => {
return false;
} }
} }
} }

View File

@@ -31,7 +31,6 @@ mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap();
index index
} }
@@ -46,8 +45,7 @@ mod tests {
]); ]);
let schema = index.schema(); let schema = index.schema();
let text_field = schema.get_field("text").unwrap(); let text_field = schema.get_field("text").unwrap();
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let test_query = |texts: Vec<&str>| { let test_query = |texts: Vec<&str>| {
let terms: Vec<Term> = texts let terms: Vec<Term> = texts
.iter() .iter()
@@ -90,8 +88,7 @@ mod tests {
index_writer.add_document(doc!(text_field=>"a b c")); index_writer.add_document(doc!(text_field=>"a b c"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let phrase_query = PhraseQuery::new(vec![ let phrase_query = PhraseQuery::new(vec![
Term::from_field_text(text_field, "a"), Term::from_field_text(text_field, "a"),
Term::from_field_text(text_field, "b"), Term::from_field_text(text_field, "b"),
@@ -115,8 +112,7 @@ mod tests {
let index = create_index(&["a b c", "a b c a b"]); let index = create_index(&["a b c", "a b c a b"]);
let schema = index.schema(); let schema = index.schema();
let text_field = schema.get_field("text").unwrap(); let text_field = schema.get_field("text").unwrap();
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let test_query = |texts: Vec<&str>| { let test_query = |texts: Vec<&str>| {
let terms: Vec<Term> = texts let terms: Vec<Term> = texts
.iter() .iter()
@@ -148,8 +144,7 @@ mod tests {
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let test_query = |texts: Vec<&str>| { let test_query = |texts: Vec<&str>| {
let terms: Vec<Term> = texts let terms: Vec<Term> = texts
.iter() .iter()
@@ -177,8 +172,7 @@ mod tests {
index_writer.add_document(doc!(text_field=>"a b c d e f g h")); index_writer.add_document(doc!(text_field=>"a b c d e f g h"));
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let test_query = |texts: Vec<(usize, &str)>| { let test_query = |texts: Vec<(usize, &str)>| {
let terms: Vec<(usize, Term)> = texts let terms: Vec<(usize, Term)> = texts
.iter() .iter()

View File

@@ -52,7 +52,7 @@ parser! {
field_name: None, field_name: None,
phrase, phrase,
}); });
try(term_query) attempt(term_query)
.or(term_default_field) .or(term_default_field)
.map(UserInputLeaf::from) .map(UserInputLeaf::from)
} }
@@ -83,12 +83,12 @@ parser! {
let lower_bound = { let lower_bound = {
let excl = (char('{'), term_val()).map(|(_, w)| UserInputBound::Exclusive(w)); let excl = (char('{'), term_val()).map(|(_, w)| UserInputBound::Exclusive(w));
let incl = (char('['), term_val()).map(|(_, w)| UserInputBound::Inclusive(w)); let incl = (char('['), term_val()).map(|(_, w)| UserInputBound::Inclusive(w));
try(excl).or(incl) attempt(excl).or(incl)
}; };
let upper_bound = { let upper_bound = {
let excl = (term_val(), char('}')).map(|(w, _)| UserInputBound::Exclusive(w)); let excl = (term_val(), char('}')).map(|(w, _)| UserInputBound::Exclusive(w));
let incl = (term_val(), char(']')).map(|(w, _)| UserInputBound::Inclusive(w)); let incl = (term_val(), char(']')).map(|(w, _)| UserInputBound::Inclusive(w));
try(excl).or(incl) attempt(excl).or(incl)
}; };
( (
optional((field(), char(':')).map(|x| x.0)), optional((field(), char(':')).map(|x| x.0)),
@@ -112,11 +112,11 @@ parser! {
.or((char('+'), leaf()).map(|(_, expr)| expr.unary(Occur::Must) )) .or((char('+'), leaf()).map(|(_, expr)| expr.unary(Occur::Must) ))
.or((char('('), parse_to_ast(), char(')')).map(|(_, expr, _)| expr)) .or((char('('), parse_to_ast(), char(')')).map(|(_, expr, _)| expr))
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All) )) .or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All) ))
.or(try( .or(attempt(
(string("NOT"), spaces1(), leaf()).map(|(_, _, expr)| expr.unary(Occur::MustNot)) (string("NOT"), spaces1(), leaf()).map(|(_, _, expr)| expr.unary(Occur::MustNot))
) )
) )
.or(try( .or(attempt(
range().map(UserInputAST::from) range().map(UserInputAST::from)
) )
) )
@@ -160,7 +160,7 @@ parser! {
where [I: Stream<Item = char>] where [I: Stream<Item = char>]
{ {
( (
try( attempt(
chainl1( chainl1(
leaf().map(Element::SingleEl), leaf().map(Element::SingleEl),
binary_operand().map(|op: BinaryOperand| binary_operand().map(|op: BinaryOperand|

View File

@@ -50,6 +50,8 @@ pub enum QueryParserError {
/// The query contains a range query with a phrase as one of the bounds. /// The query contains a range query with a phrase as one of the bounds.
/// Only terms can be used as bounds. /// Only terms can be used as bounds.
RangeMustNotHavePhrase, RangeMustNotHavePhrase,
/// The format for the date field is not RFC 3339 compliant.
DateFormatError(chrono::ParseError),
} }
impl From<ParseIntError> for QueryParserError { impl From<ParseIntError> for QueryParserError {
@@ -58,6 +60,12 @@ impl From<ParseIntError> for QueryParserError {
} }
} }
impl From<chrono::ParseError> for QueryParserError {
fn from(err: chrono::ParseError) -> QueryParserError {
QueryParserError::DateFormatError(err)
}
}
/// Recursively remove empty clause from the AST /// Recursively remove empty clause from the AST
/// ///
/// Returns `None` iff the `logical_ast` ended up being empty. /// Returns `None` iff the `logical_ast` ended up being empty.
@@ -127,6 +135,8 @@ fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
/// a word lexicographically between `a` and `c` (inclusive lower bound, exclusive upper bound). /// a word lexicographically between `a` and `c` (inclusive lower bound, exclusive upper bound).
/// Inclusive bounds are `[]`, exclusive are `{}`. /// Inclusive bounds are `[]`, exclusive are `{}`.
/// ///
/// * date values: The query parser supports rfc3339 formatted dates. For example "2002-10-02T15:00:00.05Z"
///
/// * all docs query: A plain `*` will match all documents in the index. /// * all docs query: A plain `*` will match all documents in the index.
/// ///
#[derive(Clone)] #[derive(Clone)]
@@ -229,6 +239,13 @@ impl QueryParser {
let term = Term::from_field_i64(field, val); let term = Term::from_field_i64(field, val);
Ok(vec![(0, term)]) Ok(vec![(0, term)])
} }
FieldType::Date(_) => match chrono::DateTime::parse_from_rfc3339(phrase) {
Ok(x) => Ok(vec![(
0,
Term::from_field_date(field, &x.with_timezone(&chrono::Utc)),
)]),
Err(e) => Err(QueryParserError::DateFormatError(e)),
},
FieldType::U64(_) => { FieldType::U64(_) => {
let val: u64 = u64::from_str(phrase)?; let val: u64 = u64::from_str(phrase)?;
let term = Term::from_field_u64(field, val); let term = Term::from_field_u64(field, val);
@@ -487,7 +504,7 @@ mod test {
use query::Query; use query::Query;
use schema::Field; use schema::Field;
use schema::{IndexRecordOption, TextFieldIndexing, TextOptions}; use schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
use schema::{Schema, Term, INT_INDEXED, STORED, STRING, TEXT}; use schema::{Schema, Term, INDEXED, STORED, STRING, TEXT};
use tokenizer::{LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager}; use tokenizer::{LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager};
use Index; use Index;
@@ -501,13 +518,14 @@ mod test {
.set_stored(); .set_stored();
let title = schema_builder.add_text_field("title", TEXT); let title = schema_builder.add_text_field("title", TEXT);
let text = schema_builder.add_text_field("text", TEXT); let text = schema_builder.add_text_field("text", TEXT);
schema_builder.add_i64_field("signed", INT_INDEXED); schema_builder.add_i64_field("signed", INDEXED);
schema_builder.add_u64_field("unsigned", INT_INDEXED); schema_builder.add_u64_field("unsigned", INDEXED);
schema_builder.add_text_field("notindexed_text", STORED); schema_builder.add_text_field("notindexed_text", STORED);
schema_builder.add_text_field("notindexed_u64", STORED); schema_builder.add_text_field("notindexed_u64", STORED);
schema_builder.add_text_field("notindexed_i64", STORED); schema_builder.add_text_field("notindexed_i64", STORED);
schema_builder.add_text_field("nottokenized", STRING); schema_builder.add_text_field("nottokenized", STRING);
schema_builder.add_text_field("with_stop_words", text_options); schema_builder.add_text_field("with_stop_words", text_options);
schema_builder.add_date_field("date", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let default_fields = vec![title, text]; let default_fields = vec![title, text];
let tokenizer_manager = TokenizerManager::default(); let tokenizer_manager = TokenizerManager::default();
@@ -767,6 +785,18 @@ mod test {
); );
} }
#[test]
pub fn test_query_parser_expected_date() {
let query_parser = make_query_parser();
assert_matches!(
query_parser.parse_query("date:18a"),
Err(QueryParserError::DateFormatError(_))
);
assert!(query_parser
.parse_query("date:\"1985-04-12T23:20:50.52Z\"")
.is_ok());
}
#[test] #[test]
pub fn test_query_parser_not_empty_but_no_tokens() { pub fn test_query_parser_not_empty_but_no_tokens() {
let query_parser = make_query_parser(); let query_parser = make_query_parser();

View File

@@ -40,14 +40,14 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # #[macro_use] /// # #[macro_use]
/// # extern crate tantivy; /// # extern crate tantivy;
/// # use tantivy::Index; /// # use tantivy::Index;
/// # use tantivy::schema::{Schema, INT_INDEXED}; /// # use tantivy::schema::{Schema, INDEXED};
/// # use tantivy::collector::Count; /// # use tantivy::collector::Count;
/// # use tantivy::Result; /// # use tantivy::Result;
/// # use tantivy::query::RangeQuery; /// # use tantivy::query::RangeQuery;
/// # /// #
/// # fn run() -> Result<()> { /// # fn run() -> Result<()> {
/// # let mut schema_builder = Schema::builder(); /// # let mut schema_builder = Schema::builder();
/// # let year_field = schema_builder.add_u64_field("year", INT_INDEXED); /// # let year_field = schema_builder.add_u64_field("year", INDEXED);
/// # let schema = schema_builder.build(); /// # let schema = schema_builder.build();
/// # /// #
/// # let index = Index::create_in_ram(schema); /// # let index = Index::create_in_ram(schema);
@@ -61,8 +61,8 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
/// # } /// # }
/// # index_writer.commit().unwrap(); /// # index_writer.commit().unwrap();
/// # } /// # }
/// # index.load_searchers()?; /// # let reader = index.reader()?;
/// let searcher = index.searcher(); /// let searcher = reader.searcher();
/// ///
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970); /// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
/// ///
@@ -293,7 +293,7 @@ mod tests {
use super::RangeQuery; use super::RangeQuery;
use collector::Count; use collector::Count;
use schema::{Document, Field, Schema, INT_INDEXED}; use schema::{Document, Field, Schema, INDEXED};
use std::collections::Bound; use std::collections::Bound;
use Index; use Index;
use Result; use Result;
@@ -302,7 +302,7 @@ mod tests {
fn test_range_query_simple() { fn test_range_query_simple() {
fn run() -> Result<()> { fn run() -> Result<()> {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let year_field = schema_builder.add_u64_field("year", INT_INDEXED); let year_field = schema_builder.add_u64_field("year", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema); let index = Index::create_in_ram(schema);
@@ -316,8 +316,8 @@ mod tests {
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64); let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960u64..1970u64);
@@ -335,7 +335,7 @@ mod tests {
let int_field: Field; let int_field: Field;
let schema = { let schema = {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
int_field = schema_builder.add_i64_field("intfield", INT_INDEXED); int_field = schema_builder.add_i64_field("intfield", INDEXED);
schema_builder.build() schema_builder.build()
}; };
@@ -355,8 +355,8 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let count_multiples = let count_multiples =
|range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap(); |range_query: RangeQuery| searcher.search(&range_query, &Count).unwrap();

View File

@@ -1,8 +1,8 @@
use error::TantivyError; use error::TantivyError;
use fst_regex::Regex;
use query::{AutomatonWeight, Query, Weight}; use query::{AutomatonWeight, Query, Weight};
use schema::Field; use schema::Field;
use std::clone::Clone; use std::clone::Clone;
use tantivy_fst::Regex;
use Result; use Result;
use Searcher; use Searcher;
@@ -44,8 +44,8 @@ use Searcher;
/// index_writer.commit().unwrap(); /// index_writer.commit().unwrap();
/// } /// }
/// ///
/// index.load_searchers()?; /// let reader = index.reader()?;
/// let searcher = index.searcher(); /// let searcher = reader.searcher();
/// ///
/// let term = Term::from_field_text(title, "Diary"); /// let term = Term::from_field_text(title, "Diary");
/// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title); /// let query = RegexQuery::new("d[ai]{2}ry".to_string(), title);
@@ -108,8 +108,8 @@ mod test {
)); ));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
{ {
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field); let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
let scored_docs = searcher let scored_docs = searcher

View File

@@ -32,9 +32,7 @@ mod tests {
} }
assert!(index_writer.commit().is_ok()); assert!(index_writer.commit().is_ok());
} }
let searcher = index.reader().unwrap().searcher();
index.load_searchers().unwrap();
let searcher = index.searcher();
let term_query = TermQuery::new( let term_query = TermQuery::new(
Term::from_field_text(text_field, "a"), Term::from_field_text(text_field, "a"),
IndexRecordOption::Basic, IndexRecordOption::Basic,
@@ -65,8 +63,7 @@ mod tests {
index_writer.add_document(doc!(left_field => "left4 left1")); index_writer.add_document(doc!(left_field => "left4 left1"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
{ {
let term = Term::from_field_text(left_field, "left2"); let term = Term::from_field_text(left_field, "left2");
let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs); let term_query = TermQuery::new(term, IndexRecordOption::WithFreqs);

View File

@@ -48,9 +48,8 @@ use Term;
/// )); /// ));
/// index_writer.commit()?; /// index_writer.commit()?;
/// } /// }
/// /// let reader = index.reader()?;
/// index.load_searchers()?; /// let searcher = reader.searcher();
/// let searcher = index.searcher();
/// ///
/// let query = TermQuery::new( /// let query = TermQuery::new(
/// Term::from_field_text(title, "diary"), /// Term::from_field_text(title, "diary"),

View File

@@ -5,8 +5,6 @@ use docset::DocSet;
use std::num::Wrapping; use std::num::Wrapping;
use DocId; use DocId;
const EMPTY_ARRAY: [u32; 0] = [];
/// Simulate a `Postings` objects from a `VecPostings`. /// Simulate a `Postings` objects from a `VecPostings`.
/// `VecPostings` only exist for testing purposes. /// `VecPostings` only exist for testing purposes.
/// ///

193
src/reader/mod.rs Normal file
View File

@@ -0,0 +1,193 @@
mod pool;
use self::pool::{LeasedItem, Pool};
use core::Segment;
use directory::Directory;
use directory::WatchHandle;
use directory::META_LOCK;
use std::sync::Arc;
use Index;
use Result;
use Searcher;
use SegmentReader;
use schema::Schema;
/// Defines when a new version of the index should be reloaded.
///
/// Regardless of whether you search and index in the same process, tantivy does not necessarily
/// reflects the change that are commited to your index. `ReloadPolicy` precisely helps you define
/// when you want your index to be reloaded.
#[derive(Clone, Copy)]
pub enum ReloadPolicy {
/// The index is entirely reloaded manually.
/// All updates of the index should be manual.
///
/// No change is reflected automatically. You are required to call `.load_seacher()` manually.
Manual,
/// The index is reloaded within milliseconds after a new commit is available.
/// This is made possible by watching changes in the `meta.json` file.
OnCommit, // TODO add NEAR_REAL_TIME(target_ms)
}
/// `IndexReader` builder
///
/// It makes it possible to set the following values.
///
/// - `num_searchers` (by default, the number of detected CPU threads):
///
/// When `num_searchers` queries are requested at the same time, the `num_searchers` will block
/// until the one of the searcher in-use gets released.
/// - `reload_policy` (by default `ReloadPolicy::OnCommit`):
///
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
#[derive(Clone)]
pub struct IndexReaderBuilder {
num_searchers: usize,
reload_policy: ReloadPolicy,
index: Index,
}
impl IndexReaderBuilder {
pub(crate) fn new(index: Index) -> IndexReaderBuilder {
IndexReaderBuilder {
num_searchers: num_cpus::get(),
reload_policy: ReloadPolicy::OnCommit,
index,
}
}
/// Builds the reader.
///
/// Building the reader is a non-trivial operation that requires
/// to open different segment readers. It may take hundreds of milliseconds
/// of time and it may return an error.
/// TODO(pmasurel) Use the `TryInto` trait once it is available in stable.
pub fn try_into(self) -> Result<IndexReader> {
let inner_reader = InnerIndexReader {
index: self.index,
num_searchers: self.num_searchers,
searcher_pool: Pool::new(),
};
inner_reader.reload()?;
let inner_reader_arc = Arc::new(inner_reader);
let watch_handle_opt: Option<WatchHandle>;
match self.reload_policy {
ReloadPolicy::Manual => {
// No need to set anything...
watch_handle_opt = None;
}
ReloadPolicy::OnCommit => {
let inner_reader_arc_clone = inner_reader_arc.clone();
let callback = move || {
if let Err(err) = inner_reader_arc_clone.reload() {
error!(
"Error while loading searcher after commit was detected. {:?}",
err
);
}
};
let watch_handle = inner_reader_arc.index.directory().watch(Box::new(callback));
watch_handle_opt = Some(watch_handle);
}
}
Ok(IndexReader {
inner: inner_reader_arc,
watch_handle_opt,
})
}
/// Sets the reload_policy.
///
/// See [`ReloadPolicy`](./enum.ReloadPolicy.html) for more details.
pub fn reload_policy(mut self, reload_policy: ReloadPolicy) -> IndexReaderBuilder {
self.reload_policy = reload_policy;
self
}
/// Sets the number of `Searcher` in the searcher pool.
pub fn num_searchers(mut self, num_searchers: usize) -> IndexReaderBuilder {
self.num_searchers = num_searchers;
self
}
}
struct InnerIndexReader {
num_searchers: usize,
searcher_pool: Pool<Searcher>,
index: Index,
}
impl InnerIndexReader {
fn reload(&self) -> Result<()> {
let segment_readers: Vec<SegmentReader> = {
let _meta_lock = self.index.directory().acquire_lock(&META_LOCK)?;
let searchable_segments = self.searchable_segments()?;
searchable_segments
.iter()
.map(SegmentReader::open)
.collect::<Result<_>>()?
};
let schema = self.index.schema();
let searchers = (0..self.num_searchers)
.map(|_| Searcher::new(schema.clone(), self.index.clone(), segment_readers.clone()))
.collect();
self.searcher_pool.publish_new_generation(searchers);
Ok(())
}
/// Returns the list of segments that are searchable
fn searchable_segments(&self) -> Result<Vec<Segment>> {
self.index.searchable_segments()
}
fn searcher(&self) -> LeasedItem<Searcher> {
self.searcher_pool.acquire()
}
}
/// `IndexReader` is your entry point to read and search the index.
///
/// It controls when a new version of the index should be loaded and lends
/// you instances of `Searcher` for the last loaded version.
///
/// `Clone` does not clone the different pool of searcher. `IndexReader`
/// just wraps and `Arc`.
#[derive(Clone)]
pub struct IndexReader {
inner: Arc<InnerIndexReader>,
watch_handle_opt: Option<WatchHandle>,
}
impl IndexReader {
pub fn schema(&self) -> Schema {
self.inner.index.schema()
}
/// Update searchers so that they reflect the state of the last
/// `.commit()`.
///
/// If you set up the `OnCommit` `ReloadPolicy` (which is the default)
/// every commit should be rapidly reflected on your `IndexReader` and you should
/// not need to call `reload()` at all.
///
/// This automatic reload can take 10s of milliseconds to kick in however, and in unit tests
/// it can be nice to deterministically force the reload of searchers.
pub fn reload(&self) -> Result<()> {
self.inner.reload()
}
/// Returns a searcher
///
/// This method should be called every single time a search
/// query is performed.
/// The searchers are taken from a pool of `num_searchers` searchers.
/// If no searcher is available
/// this may block.
///
/// The same searcher must be used for a given query, as it ensures
/// the use of a consistent segment set.
pub fn searcher(&self) -> LeasedItem<Searcher> {
self.inner.searcher()
}
}

View File

@@ -1,4 +1,5 @@
use crossbeam::queue::MsQueue; use crossbeam::crossbeam_channel::unbounded;
use crossbeam::{Receiver, RecvError, Sender};
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
@@ -9,6 +10,37 @@ pub struct GenerationItem<T> {
item: T, item: T,
} }
/// Queue implementation for the Object Pool below
/// Uses the unbounded Linked-List type queue from crossbeam-channel
/// Splits the Queue into sender and receiver
struct Queue<T> {
sender: Sender<T>,
receiver: Receiver<T>,
}
impl<T> Queue<T> {
fn new() -> Self {
let (s, r) = unbounded();
Queue {
sender: s,
receiver: r,
}
}
/// Sender trait returns a Result type, which is ignored.
/// The Result is not handled at the moment
fn push(&self, elem: T) {
self.sender
.send(elem)
.expect("Sending an item to crossbeam-queue shouldn't fail");
}
/// Relies on the underlying crossbeam-channel Receiver
/// to block on empty queue
fn pop(&self) -> Result<T, RecvError> {
self.receiver.recv()
}
}
/// An object pool /// An object pool
/// ///
@@ -16,14 +48,14 @@ pub struct GenerationItem<T> {
/// Object are wrapped in a `LeasedItem` wrapper and are /// Object are wrapped in a `LeasedItem` wrapper and are
/// released automatically back into the pool on `Drop`. /// released automatically back into the pool on `Drop`.
pub struct Pool<T> { pub struct Pool<T> {
queue: Arc<MsQueue<GenerationItem<T>>>, queue: Arc<Queue<GenerationItem<T>>>,
freshest_generation: AtomicUsize, freshest_generation: AtomicUsize,
next_generation: AtomicUsize, next_generation: AtomicUsize,
} }
impl<T> Pool<T> { impl<T> Pool<T> {
pub fn new() -> Pool<T> { pub fn new() -> Pool<T> {
let queue = Arc::new(MsQueue::new()); let queue = Arc::new(Queue::new());
Pool { Pool {
queue, queue,
freshest_generation: AtomicUsize::default(), freshest_generation: AtomicUsize::default(),
@@ -77,7 +109,7 @@ impl<T> Pool<T> {
pub fn acquire(&self) -> LeasedItem<T> { pub fn acquire(&self) -> LeasedItem<T> {
let generation = self.generation(); let generation = self.generation();
loop { loop {
let gen_item = self.queue.pop(); let gen_item = self.queue.pop().unwrap();
if gen_item.generation >= generation { if gen_item.generation >= generation {
return LeasedItem { return LeasedItem {
gen_item: Some(gen_item), gen_item: Some(gen_item),
@@ -93,7 +125,7 @@ impl<T> Pool<T> {
pub struct LeasedItem<T> { pub struct LeasedItem<T> {
gen_item: Option<GenerationItem<T>>, gen_item: Option<GenerationItem<T>>,
recycle_queue: Arc<MsQueue<GenerationItem<T>>>, recycle_queue: Arc<Queue<GenerationItem<T>>>,
} }
impl<T> Deref for LeasedItem<T> { impl<T> Deref for LeasedItem<T> {
@@ -130,6 +162,7 @@ impl<T> Drop for LeasedItem<T> {
mod tests { mod tests {
use super::Pool; use super::Pool;
use super::Queue;
use std::iter; use std::iter;
#[test] #[test]
@@ -146,4 +179,47 @@ mod tests {
assert_eq!(*pool.acquire(), 11); assert_eq!(*pool.acquire(), 11);
} }
} }
#[test]
fn test_queue() {
let q = Queue::new();
let elem = 5;
q.push(elem);
let res = q.pop();
assert_eq!(res.unwrap(), elem);
}
#[test]
fn test_pool_dont_panic_on_empty_pop() {
// When the object pool is exhausted, it shouldn't panic on pop()
use std::sync::Arc;
use std::{thread, time};
// Wrap the pool in an Arc, same way as its used in `core/index.rs`
let pool = Arc::new(Pool::new());
// clone pools outside the move scope of each new thread
let pool1 = Arc::clone(&pool);
let pool2 = Arc::clone(&pool);
let elements_for_pool = vec![1, 2];
pool.publish_new_generation(elements_for_pool);
let mut threads = vec![];
let sleep_dur = time::Duration::from_millis(10);
// spawn one more thread than there are elements in the pool
threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool.acquire();
thread::sleep(sleep_dur);
}));
threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool1.acquire();
thread::sleep(sleep_dur);
}));
threads.push(thread::spawn(move || {
// leasing to make sure it's not dropped before sleep is called
let _leased_searcher = &pool2.acquire();
thread::sleep(sleep_dur);
}));
}
} }

View File

@@ -3,6 +3,7 @@ use common::BinarySerializable;
use common::VInt; use common::VInt;
use itertools::Itertools; use itertools::Itertools;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use DateTime;
/// Tantivy's Document is the object that can /// Tantivy's Document is the object that can
/// be indexed and then searched for. /// be indexed and then searched for.
@@ -82,11 +83,16 @@ impl Document {
self.add(FieldValue::new(field, Value::U64(value))); self.add(FieldValue::new(field, Value::U64(value)));
} }
/// Add a u64 field /// Add a i64 field
pub fn add_i64(&mut self, field: Field, value: i64) { pub fn add_i64(&mut self, field: Field, value: i64) {
self.add(FieldValue::new(field, Value::I64(value))); self.add(FieldValue::new(field, Value::I64(value)));
} }
/// Add a date field
pub fn add_date(&mut self, field: Field, value: &DateTime) {
self.add(FieldValue::new(field, Value::Date(*value)));
}
/// Add a bytes field /// Add a bytes field
pub fn add_bytes(&mut self, field: Field, value: Vec<u8>) { pub fn add_bytes(&mut self, field: Field, value: Vec<u8>) {
self.add(FieldValue::new(field, Value::Bytes(value))) self.add(FieldValue::new(field, Value::Bytes(value)))

View File

@@ -48,6 +48,15 @@ impl FieldEntry {
} }
} }
/// Creates a new date field entry in the schema, given
/// a name, and some options.
pub fn new_date(field_name: String, field_type: IntOptions) -> FieldEntry {
FieldEntry {
name: field_name,
field_type: FieldType::Date(field_type),
}
}
/// Creates a field entry for a facet. /// Creates a field entry for a facet.
pub fn new_facet(field_name: String) -> FieldEntry { pub fn new_facet(field_name: String) -> FieldEntry {
FieldEntry { FieldEntry {
@@ -78,7 +87,9 @@ impl FieldEntry {
pub fn is_indexed(&self) -> bool { pub fn is_indexed(&self) -> bool {
match self.field_type { match self.field_type {
FieldType::Str(ref options) => options.get_indexing_options().is_some(), FieldType::Str(ref options) => options.get_indexing_options().is_some(),
FieldType::U64(ref options) | FieldType::I64(ref options) => options.is_indexed(), FieldType::U64(ref options)
| FieldType::I64(ref options)
| FieldType::Date(ref options) => options.is_indexed(),
FieldType::HierarchicalFacet => true, FieldType::HierarchicalFacet => true,
FieldType::Bytes => false, FieldType::Bytes => false,
} }
@@ -95,7 +106,9 @@ impl FieldEntry {
/// Returns true iff the field is stored /// Returns true iff the field is stored
pub fn is_stored(&self) -> bool { pub fn is_stored(&self) -> bool {
match self.field_type { match self.field_type {
FieldType::U64(ref options) | FieldType::I64(ref options) => options.is_stored(), FieldType::U64(ref options)
| FieldType::I64(ref options)
| FieldType::Date(ref options) => options.is_stored(),
FieldType::Str(ref options) => options.is_stored(), FieldType::Str(ref options) => options.is_stored(),
// TODO make stored hierarchical facet optional // TODO make stored hierarchical facet optional
FieldType::HierarchicalFacet => true, FieldType::HierarchicalFacet => true,
@@ -125,6 +138,10 @@ impl Serialize for FieldEntry {
s.serialize_field("type", "i64")?; s.serialize_field("type", "i64")?;
s.serialize_field("options", options)?; s.serialize_field("options", options)?;
} }
FieldType::Date(ref options) => {
s.serialize_field("type", "date")?;
s.serialize_field("options", options)?;
}
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
s.serialize_field("type", "hierarchical_facet")?; s.serialize_field("type", "hierarchical_facet")?;
} }
@@ -188,7 +205,7 @@ impl<'de> Deserialize<'de> for FieldEntry {
"bytes" => { "bytes" => {
field_type = Some(FieldType::Bytes); field_type = Some(FieldType::Bytes);
} }
"text" | "u64" | "i64" => { "text" | "u64" | "i64" | "date" => {
// These types require additional options to create a field_type // These types require additional options to create a field_type
} }
_ => panic!("unhandled type"), _ => panic!("unhandled type"),
@@ -205,6 +222,7 @@ impl<'de> Deserialize<'de> for FieldEntry {
"text" => field_type = Some(FieldType::Str(map.next_value()?)), "text" => field_type = Some(FieldType::Str(map.next_value()?)),
"u64" => field_type = Some(FieldType::U64(map.next_value()?)), "u64" => field_type = Some(FieldType::U64(map.next_value()?)),
"i64" => field_type = Some(FieldType::I64(map.next_value()?)), "i64" => field_type = Some(FieldType::I64(map.next_value()?)),
"date" => field_type = Some(FieldType::Date(map.next_value()?)),
_ => { _ => {
let msg = format!("Unrecognised type {}", ty); let msg = format!("Unrecognised type {}", ty);
return Err(de::Error::custom(msg)); return Err(de::Error::custom(msg));

View File

@@ -34,6 +34,8 @@ pub enum Type {
U64, U64,
/// `i64` /// `i64`
I64, I64,
/// `date(i64) timestamp`
Date,
/// `tantivy::schema::Facet`. Passed as a string in JSON. /// `tantivy::schema::Facet`. Passed as a string in JSON.
HierarchicalFacet, HierarchicalFacet,
/// `Vec<u8>` /// `Vec<u8>`
@@ -50,6 +52,8 @@ pub enum FieldType {
U64(IntOptions), U64(IntOptions),
/// Signed 64-bits integers 64 field type configuration /// Signed 64-bits integers 64 field type configuration
I64(IntOptions), I64(IntOptions),
/// Signed 64-bits Date 64 field type configuration,
Date(IntOptions),
/// Hierachical Facet /// Hierachical Facet
HierarchicalFacet, HierarchicalFacet,
/// Bytes (one per document) /// Bytes (one per document)
@@ -63,6 +67,7 @@ impl FieldType {
FieldType::Str(_) => Type::Str, FieldType::Str(_) => Type::Str,
FieldType::U64(_) => Type::U64, FieldType::U64(_) => Type::U64,
FieldType::I64(_) => Type::I64, FieldType::I64(_) => Type::I64,
FieldType::Date(_) => Type::Date,
FieldType::HierarchicalFacet => Type::HierarchicalFacet, FieldType::HierarchicalFacet => Type::HierarchicalFacet,
FieldType::Bytes => Type::Bytes, FieldType::Bytes => Type::Bytes,
} }
@@ -75,6 +80,7 @@ impl FieldType {
FieldType::U64(ref int_options) | FieldType::I64(ref int_options) => { FieldType::U64(ref int_options) | FieldType::I64(ref int_options) => {
int_options.is_indexed() int_options.is_indexed()
} }
FieldType::Date(ref date_options) => date_options.is_indexed(),
FieldType::HierarchicalFacet => true, FieldType::HierarchicalFacet => true,
FieldType::Bytes => false, FieldType::Bytes => false,
} }
@@ -89,7 +95,9 @@ impl FieldType {
FieldType::Str(ref text_options) => text_options FieldType::Str(ref text_options) => text_options
.get_indexing_options() .get_indexing_options()
.map(|indexing_options| indexing_options.index_option()), .map(|indexing_options| indexing_options.index_option()),
FieldType::U64(ref int_options) | FieldType::I64(ref int_options) => { FieldType::U64(ref int_options)
| FieldType::I64(ref int_options)
| FieldType::Date(ref int_options) => {
if int_options.is_indexed() { if int_options.is_indexed() {
Some(IndexRecordOption::Basic) Some(IndexRecordOption::Basic)
} else { } else {
@@ -110,9 +118,9 @@ impl FieldType {
match *json { match *json {
JsonValue::String(ref field_text) => match *self { JsonValue::String(ref field_text) => match *self {
FieldType::Str(_) => Ok(Value::Str(field_text.clone())), FieldType::Str(_) => Ok(Value::Str(field_text.clone())),
FieldType::U64(_) | FieldType::I64(_) => Err(ValueParsingError::TypeError( FieldType::U64(_) | FieldType::I64(_) | FieldType::Date(_) => Err(
format!("Expected an integer, got {:?}", json), ValueParsingError::TypeError(format!("Expected an integer, got {:?}", json)),
)), ),
FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))), FieldType::HierarchicalFacet => Ok(Value::Facet(Facet::from(field_text))),
FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| { FieldType::Bytes => decode(field_text).map(Value::Bytes).map_err(|_| {
ValueParsingError::InvalidBase64(format!( ValueParsingError::InvalidBase64(format!(
@@ -122,7 +130,7 @@ impl FieldType {
}), }),
}, },
JsonValue::Number(ref field_val_num) => match *self { JsonValue::Number(ref field_val_num) => match *self {
FieldType::I64(_) => { FieldType::I64(_) | FieldType::Date(_) => {
if let Some(field_val_i64) = field_val_num.as_i64() { if let Some(field_val_i64) = field_val_num.as_i64() {
Ok(Value::I64(field_val_i64)) Ok(Value::I64(field_val_i64))
} else { } else {

81
src/schema/flags.rs Normal file
View File

@@ -0,0 +1,81 @@
use schema::IntOptions;
use schema::TextOptions;
use std::ops::BitOr;
#[derive(Clone)]
pub struct StoredFlag;
/// Flag to mark the field as stored.
/// This flag can apply to any kind of field.
///
/// A stored fields of a document can be retrieved given its `DocId`.
/// Stored field are stored together and LZ4 compressed.
/// Reading the stored fields of a document is relatively slow.
/// (~ 100 microsecs)
///
/// It should not be used during scoring or collection.
pub const STORED: SchemaFlagList<StoredFlag, ()> = SchemaFlagList {
head: StoredFlag,
tail: (),
};
#[derive(Clone)]
pub struct IndexedFlag;
/// Flag to mark the field as indexed.
///
/// The `INDEXED` flag can only be used when building `IntOptions` (`u64` and `i64` fields)
/// Of course, text fields can also be indexed... But this is expressed by using either the
/// `STRING` (untokenized) or `TEXT` (tokenized with the english tokenizer) flags.
pub const INDEXED: SchemaFlagList<IndexedFlag, ()> = SchemaFlagList {
head: IndexedFlag,
tail: (),
};
#[derive(Clone)]
pub struct FastFlag;
/// Flag to mark the field as a fast field (similar to Lucene's DocValues)
///
/// Fast fields can be random-accessed rapidly. Fields useful for scoring, filtering
/// or collection should be mark as fast fields.
/// The `FAST` flag can only be used when building `IntOptions` (`u64` and `i64` fields)
pub const FAST: SchemaFlagList<FastFlag, ()> = SchemaFlagList {
head: FastFlag,
tail: (),
};
impl<Head, OldHead, OldTail> BitOr<SchemaFlagList<Head, ()>> for SchemaFlagList<OldHead, OldTail>
where
Head: Clone,
OldHead: Clone,
OldTail: Clone,
{
type Output = SchemaFlagList<Head, SchemaFlagList<OldHead, OldTail>>;
fn bitor(self, head: SchemaFlagList<Head, ()>) -> Self::Output {
SchemaFlagList {
head: head.head,
tail: self.clone(),
}
}
}
impl<T: Clone + Into<IntOptions>> BitOr<IntOptions> for SchemaFlagList<T, ()> {
type Output = IntOptions;
fn bitor(self, rhs: IntOptions) -> Self::Output {
self.head.into() | rhs
}
}
impl<T: Clone + Into<TextOptions>> BitOr<TextOptions> for SchemaFlagList<T, ()> {
type Output = TextOptions;
fn bitor(self, rhs: TextOptions) -> Self::Output {
self.head.into() | rhs
}
}
#[derive(Clone)]
pub struct SchemaFlagList<Head: Clone, Tail: Clone> {
pub head: Head,
pub tail: Tail,
}

View File

@@ -1,3 +1,4 @@
use schema::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
use std::ops::BitOr; use std::ops::BitOr;
/// Express whether a field is single-value or multi-valued. /// Express whether a field is single-value or multi-valued.
@@ -85,41 +86,62 @@ impl Default for IntOptions {
} }
} }
/// Shortcut for a u64 fast field. impl From<()> for IntOptions {
/// fn from(_: ()) -> IntOptions {
/// Such a shortcut can be composed as follows `STORED | FAST | INT_INDEXED` IntOptions::default()
pub const FAST: IntOptions = IntOptions { }
indexed: false, }
stored: false,
fast: Some(Cardinality::SingleValue),
};
/// Shortcut for a u64 indexed field. impl From<FastFlag> for IntOptions {
/// fn from(_: FastFlag) -> Self {
/// Such a shortcut can be composed as follows `STORED | FAST | INT_INDEXED` IntOptions {
pub const INT_INDEXED: IntOptions = IntOptions { indexed: false,
indexed: true, stored: false,
stored: false, fast: Some(Cardinality::SingleValue),
fast: None, }
}; }
}
/// Shortcut for a u64 stored field. impl From<StoredFlag> for IntOptions {
/// fn from(_: StoredFlag) -> Self {
/// Such a shortcut can be composed as follows `STORED | FAST | INT_INDEXED` IntOptions {
pub const INT_STORED: IntOptions = IntOptions { indexed: false,
indexed: false, stored: true,
stored: true, fast: None,
fast: None, }
}; }
}
impl BitOr for IntOptions { impl From<IndexedFlag> for IntOptions {
fn from(_: IndexedFlag) -> Self {
IntOptions {
indexed: true,
stored: false,
fast: None,
}
}
}
impl<T: Into<IntOptions>> BitOr<T> for IntOptions {
type Output = IntOptions; type Output = IntOptions;
fn bitor(self, other: IntOptions) -> IntOptions { fn bitor(self, other: T) -> IntOptions {
let mut res = IntOptions::default(); let mut res = IntOptions::default();
let other = other.into();
res.indexed = self.indexed | other.indexed; res.indexed = self.indexed | other.indexed;
res.stored = self.stored | other.stored; res.stored = self.stored | other.stored;
res.fast = self.fast.or(other.fast); res.fast = self.fast.or(other.fast);
res res
} }
} }
impl<Head, Tail> From<SchemaFlagList<Head, Tail>> for IntOptions
where
Head: Clone,
Tail: Clone,
Self: BitOr<Output = Self> + From<Head> + From<Tail>,
{
fn from(head_tail: SchemaFlagList<Head, Tail>) -> Self {
Self::from(head_tail.head) | Self::from(head_tail.tail)
}
}

View File

@@ -33,7 +33,7 @@ let title_options = TextOptions::default()
.set_indexing_options(TextFieldIndexing::default() .set_indexing_options(TextFieldIndexing::default()
.set_tokenizer("default") .set_tokenizer("default")
.set_index_option(IndexRecordOption::WithFreqsAndPositions)); .set_index_option(IndexRecordOption::WithFreqsAndPositions));
schema_builder.add_text_field("title_options", title_options); schema_builder.add_text_field("title", title_options);
let schema = schema_builder.build(); let schema = schema_builder.build();
``` ```
@@ -53,23 +53,8 @@ The effect of each possible setting is described more in detail
On the other hand setting the field as stored or not determines whether the field should be returned On the other hand setting the field as stored or not determines whether the field should be returned
when [`searcher.doc(doc_address)`](../struct.Searcher.html#method.doc) is called. when [`searcher.doc(doc_address)`](../struct.Searcher.html#method.doc) is called.
### Shortcuts
For convenience, a few special values of `TextOptions`. ## Setting a u64 or a i64 field
They can be composed using the `|` operator.
The example can be rewritten :
```
use tantivy::schema::*;
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title_options", TEXT | STORED);
let schema = schema_builder.build();
```
## Setting a u64 field
### Example ### Example
@@ -98,6 +83,23 @@ u64 that are indexed as fast will be stored in a special data structure that wil
make it possible to access the u64 value given the doc id rapidly. This is useful if the value of make it possible to access the u64 value given the doc id rapidly. This is useful if the value of
the field is required during scoring or collection for instance. the field is required during scoring or collection for instance.
### Shortcuts
For convenience, it is possible to define your field indexing options by combining different flags
using the `|` operator.
For instance, a schema containing the two fields defined in the example above could be rewritten :
```
use tantivy::schema::*;
let mut schema_builder = Schema::builder();
schema_builder.add_u64_field("num_stars", INDEXED | STORED);
schema_builder.add_text_field("title", TEXT | STORED);
let schema = schema_builder.build();
```
*/ */
mod document; mod document;
@@ -116,13 +118,15 @@ mod named_field_document;
mod text_options; mod text_options;
mod value; mod value;
mod flags;
pub use self::named_field_document::NamedFieldDocument; pub use self::named_field_document::NamedFieldDocument;
pub use self::schema::DocParsingError; pub use self::schema::DocParsingError;
pub use self::schema::{Schema, SchemaBuilder}; pub use self::schema::{Schema, SchemaBuilder};
pub use self::value::Value; pub use self::value::Value;
pub use self::facet::Facet; pub use self::facet::Facet;
pub use self::facet::FACET_SEP_BYTE; pub(crate) use self::facet::FACET_SEP_BYTE;
pub use self::document::Document; pub use self::document::Document;
pub use self::field::Field; pub use self::field::Field;
@@ -135,15 +139,12 @@ pub use self::field_value::FieldValue;
pub use self::index_record_option::IndexRecordOption; pub use self::index_record_option::IndexRecordOption;
pub use self::text_options::TextFieldIndexing; pub use self::text_options::TextFieldIndexing;
pub use self::text_options::TextOptions; pub use self::text_options::TextOptions;
pub use self::text_options::STORED;
pub use self::text_options::STRING; pub use self::text_options::STRING;
pub use self::text_options::TEXT; pub use self::text_options::TEXT;
pub use self::flags::{FAST, INDEXED, STORED};
pub use self::int_options::Cardinality; pub use self::int_options::Cardinality;
pub use self::int_options::IntOptions; pub use self::int_options::IntOptions;
pub use self::int_options::FAST;
pub use self::int_options::INT_INDEXED;
pub use self::int_options::INT_STORED;
use regex::Regex; use regex::Regex;

View File

@@ -52,9 +52,13 @@ impl SchemaBuilder {
/// by the second one. /// by the second one.
/// The first field will get a field id /// The first field will get a field id
/// but only the second one will be indexed /// but only the second one will be indexed
pub fn add_u64_field(&mut self, field_name_str: &str, field_options: IntOptions) -> Field { pub fn add_u64_field<T: Into<IntOptions>>(
&mut self,
field_name_str: &str,
field_options: T,
) -> Field {
let field_name = String::from(field_name_str); let field_name = String::from(field_name_str);
let field_entry = FieldEntry::new_u64(field_name, field_options); let field_entry = FieldEntry::new_u64(field_name, field_options.into());
self.add_field(field_entry) self.add_field(field_entry)
} }
@@ -68,9 +72,35 @@ impl SchemaBuilder {
/// by the second one. /// by the second one.
/// The first field will get a field id /// The first field will get a field id
/// but only the second one will be indexed /// but only the second one will be indexed
pub fn add_i64_field(&mut self, field_name_str: &str, field_options: IntOptions) -> Field { pub fn add_i64_field<T: Into<IntOptions>>(
&mut self,
field_name_str: &str,
field_options: T,
) -> Field {
let field_name = String::from(field_name_str); let field_name = String::from(field_name_str);
let field_entry = FieldEntry::new_i64(field_name, field_options); let field_entry = FieldEntry::new_i64(field_name, field_options.into());
self.add_field(field_entry)
}
/// Adds a new date field.
/// Returns the associated field handle
/// Internally, Tantivy simply stores dates as i64 UTC timestamps,
/// while the user supplies DateTime values for convenience.
///
/// # Caution
///
/// Appending two fields with the same name
/// will result in the shadowing of the first
/// by the second one.
/// The first field will get a field id
/// but only the second one will be indexed
pub fn add_date_field<T: Into<IntOptions>>(
&mut self,
field_name_str: &str,
field_options: T,
) -> Field {
let field_name = String::from(field_name_str);
let field_entry = FieldEntry::new_date(field_name, field_options.into());
self.add_field(field_entry) self.add_field(field_entry)
} }
@@ -84,9 +114,13 @@ impl SchemaBuilder {
/// by the second one. /// by the second one.
/// The first field will get a field id /// The first field will get a field id
/// but only the second one will be indexed /// but only the second one will be indexed
pub fn add_text_field(&mut self, field_name_str: &str, field_options: TextOptions) -> Field { pub fn add_text_field<T: Into<TextOptions>>(
&mut self,
field_name_str: &str,
field_options: T,
) -> Field {
let field_name = String::from(field_name_str); let field_name = String::from(field_name_str);
let field_entry = FieldEntry::new_text(field_name, field_options); let field_entry = FieldEntry::new_text(field_name, field_options.into());
self.add_field(field_entry) self.add_field(field_entry)
} }
@@ -178,15 +212,7 @@ impl Schema {
SchemaBuilder::default() SchemaBuilder::default()
} }
/// Returns the field options associated with a given name. /// Returns the field option associated with a given name.
///
/// # Panics
/// Panics if the field name does not exist.
/// It is meant as an helper for user who created
/// and control the content of their schema.
///
/// If panicking is not an option for you,
/// you may use `get(&self, field_name: &str)`.
pub fn get_field(&self, field_name: &str) -> Option<Field> { pub fn get_field(&self, field_name: &str) -> Option<Field> {
self.0.fields_map.get(field_name).cloned() self.0.fields_map.get(field_name).cloned()
} }

View File

@@ -5,6 +5,7 @@ use byteorder::{BigEndian, ByteOrder};
use common; use common;
use schema::Facet; use schema::Facet;
use std::str; use std::str;
use DateTime;
/// Size (in bytes) of the buffer of a int field. /// Size (in bytes) of the buffer of a int field.
const INT_TERM_LEN: usize = 4 + 8; const INT_TERM_LEN: usize = 4 + 8;
@@ -30,6 +31,18 @@ impl Term {
Term::from_field_u64(field, val_u64) Term::from_field_u64(field, val_u64)
} }
/// Builds a term given a field, and a DateTime value
///
/// Assuming the term has a field id of 1, and a timestamp i64 value of 3234,
/// the Term will have 8 bytes.
///
/// The first four byte are dedicated to storing the field id as a u64.
/// The 4 following bytes are encoding the DateTime as i64 timestamp value.
pub fn from_field_date(field: Field, val: &DateTime) -> Term {
let val_timestamp = val.timestamp();
Term::from_field_i64(field, val_timestamp)
}
/// Creates a `Term` given a facet. /// Creates a `Term` given a facet.
pub fn from_facet(field: Field, facet: &Facet) -> Term { pub fn from_facet(field: Field, facet: &Facet) -> Term {
let bytes = facet.encoded_str().as_bytes(); let bytes = facet.encoded_str().as_bytes();

View File

@@ -1,3 +1,5 @@
use schema::flags::SchemaFlagList;
use schema::flags::StoredFlag;
use schema::IndexRecordOption; use schema::IndexRecordOption;
use std::borrow::Cow; use std::borrow::Cow;
use std::ops::BitOr; use std::ops::BitOr;
@@ -109,19 +111,11 @@ pub const TEXT: TextOptions = TextOptions {
stored: false, stored: false,
}; };
/// A stored fields of a document can be retrieved given its `DocId`. impl<T: Into<TextOptions>> BitOr<T> for TextOptions {
/// Stored field are stored together and LZ4 compressed.
/// Reading the stored fields of a document is relatively slow.
/// (100 microsecs)
pub const STORED: TextOptions = TextOptions {
indexing: None,
stored: true,
};
impl BitOr for TextOptions {
type Output = TextOptions; type Output = TextOptions;
fn bitor(self, other: TextOptions) -> TextOptions { fn bitor(self, other: T) -> TextOptions {
let other = other.into();
let mut res = TextOptions::default(); let mut res = TextOptions::default();
res.indexing = self.indexing.or(other.indexing); res.indexing = self.indexing.or(other.indexing);
res.stored = self.stored | other.stored; res.stored = self.stored | other.stored;
@@ -129,6 +123,32 @@ impl BitOr for TextOptions {
} }
} }
impl From<()> for TextOptions {
fn from(_: ()) -> TextOptions {
TextOptions::default()
}
}
impl From<StoredFlag> for TextOptions {
fn from(_: StoredFlag) -> TextOptions {
TextOptions {
indexing: None,
stored: true,
}
}
}
impl<Head, Tail> From<SchemaFlagList<Head, Tail>> for TextOptions
where
Head: Clone,
Tail: Clone,
Self: BitOr<Output = Self> + From<Head> + From<Tail>,
{
fn from(head_tail: SchemaFlagList<Head, Tail>) -> Self {
Self::from(head_tail.head) | Self::from(head_tail.tail)
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use schema::*; use schema::*;

View File

@@ -2,6 +2,7 @@ use schema::Facet;
use serde::de::Visitor; use serde::de::Visitor;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt; use std::fmt;
use DateTime;
/// Value represents the value of a any field. /// Value represents the value of a any field.
/// It is an enum over all over all of the possible field type. /// It is an enum over all over all of the possible field type.
@@ -13,6 +14,8 @@ pub enum Value {
U64(u64), U64(u64),
/// Signed 64-bits Integer `i64` /// Signed 64-bits Integer `i64`
I64(i64), I64(i64),
/// Signed 64-bits Date time stamp `date`
Date(DateTime),
/// Hierarchical Facet /// Hierarchical Facet
Facet(Facet), Facet(Facet),
/// Arbitrarily sized byte array /// Arbitrarily sized byte array
@@ -28,6 +31,7 @@ impl Serialize for Value {
Value::Str(ref v) => serializer.serialize_str(v), Value::Str(ref v) => serializer.serialize_str(v),
Value::U64(u) => serializer.serialize_u64(u), Value::U64(u) => serializer.serialize_u64(u),
Value::I64(u) => serializer.serialize_i64(u), Value::I64(u) => serializer.serialize_i64(u),
Value::Date(ref date) => serializer.serialize_i64(date.timestamp()),
Value::Facet(ref facet) => facet.serialize(serializer), Value::Facet(ref facet) => facet.serialize(serializer),
Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes), Value::Bytes(ref bytes) => serializer.serialize_bytes(bytes),
} }
@@ -102,6 +106,17 @@ impl Value {
_ => panic!("This is not a text field."), _ => panic!("This is not a text field."),
} }
} }
/// Returns the Date-value, provided the value is of the `Date` type.
///
/// # Panics
/// If the value is not of type `Date`
pub fn date_value(&self) -> &DateTime {
match *self {
Value::Date(ref value) => value,
_ => panic!("This is not a date field."),
}
}
} }
impl From<String> for Value { impl From<String> for Value {
@@ -122,6 +137,12 @@ impl From<i64> for Value {
} }
} }
impl From<DateTime> for Value {
fn from(date_time: DateTime) -> Value {
Value::Date(date_time)
}
}
impl<'a> From<&'a str> for Value { impl<'a> From<&'a str> for Value {
fn from(s: &'a str) -> Value { fn from(s: &'a str) -> Value {
Value::Str(s.to_string()) Value::Str(s.to_string())
@@ -142,6 +163,7 @@ impl From<Vec<u8>> for Value {
mod binary_serialize { mod binary_serialize {
use super::Value; use super::Value;
use chrono::{TimeZone, Utc};
use common::BinarySerializable; use common::BinarySerializable;
use schema::Facet; use schema::Facet;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
@@ -151,6 +173,7 @@ mod binary_serialize {
const I64_CODE: u8 = 2; const I64_CODE: u8 = 2;
const HIERARCHICAL_FACET_CODE: u8 = 3; const HIERARCHICAL_FACET_CODE: u8 = 3;
const BYTES_CODE: u8 = 4; const BYTES_CODE: u8 = 4;
const DATE_CODE: u8 = 5;
impl BinarySerializable for Value { impl BinarySerializable for Value {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> { fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
@@ -167,6 +190,10 @@ mod binary_serialize {
I64_CODE.serialize(writer)?; I64_CODE.serialize(writer)?;
val.serialize(writer) val.serialize(writer)
} }
Value::Date(ref val) => {
DATE_CODE.serialize(writer)?;
val.timestamp().serialize(writer)
}
Value::Facet(ref facet) => { Value::Facet(ref facet) => {
HIERARCHICAL_FACET_CODE.serialize(writer)?; HIERARCHICAL_FACET_CODE.serialize(writer)?;
facet.serialize(writer) facet.serialize(writer)
@@ -192,6 +219,10 @@ mod binary_serialize {
let value = i64::deserialize(reader)?; let value = i64::deserialize(reader)?;
Ok(Value::I64(value)) Ok(Value::I64(value))
} }
DATE_CODE => {
let timestamp = i64::deserialize(reader)?;
Ok(Value::Date(Utc.timestamp(timestamp, 0)))
}
HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)), HIERARCHICAL_FACET_CODE => Ok(Value::Facet(Facet::deserialize(reader)?)),
BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)), BYTES_CODE => Ok(Value::Bytes(Vec::<u8>::deserialize(reader)?)),
_ => Err(io::Error::new( _ => Err(io::Error::new(

View File

@@ -241,8 +241,8 @@ fn select_best_fragment_combination(fragments: &[FragmentCandidate], text: &str)
/// # let query_parser = QueryParser::for_index(&index, vec![text_field]); /// # let query_parser = QueryParser::for_index(&index, vec![text_field]);
/// // ... /// // ...
/// let query = query_parser.parse_query("haleurs flamands").unwrap(); /// let query = query_parser.parse_query("haleurs flamands").unwrap();
/// # index.load_searchers()?; /// # let reader = index.reader()?;
/// # let searcher = index.searcher(); /// # let searcher = reader.searcher();
/// let mut snippet_generator = SnippetGenerator::create(&searcher, &*query, text_field)?; /// let mut snippet_generator = SnippetGenerator::create(&searcher, &*query, text_field)?;
/// snippet_generator.set_max_num_chars(100); /// snippet_generator.set_max_num_chars(100);
/// let snippet = snippet_generator.snippet_from_doc(&doc); /// let snippet = snippet_generator.snippet_from_doc(&doc);
@@ -528,9 +528,8 @@ Survey in 2016, 2017, and 2018."#;
index_writer.add_document(doc!(text_field => "a")); index_writer.add_document(doc!(text_field => "a"));
index_writer.add_document(doc!(text_field => "a b")); index_writer.add_document(doc!(text_field => "a b"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
index.load_searchers().unwrap();
} }
let searcher = index.searcher(); let searcher = index.reader().unwrap().searcher();
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
{ {
let query = query_parser.parse_query("e").unwrap(); let query = query_parser.parse_query("e").unwrap();
@@ -587,8 +586,7 @@ Survey in 2016, 2017, and 2018."#;
} }
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let query_parser = QueryParser::for_index(&index, vec![text_field]); let query_parser = QueryParser::for_index(&index, vec![text_field]);
let query = query_parser.parse_query("rust design").unwrap(); let query = query_parser.parse_query("rust design").unwrap();
let mut snippet_generator = let mut snippet_generator =

View File

@@ -295,8 +295,7 @@ mod test {
use core::Index; use core::Index;
use schema::Field; use schema::Field;
use schema::Schema; use schema::Schema;
use schema::STORED; use schema::{FAST, INDEXED, STORED, TEXT};
use schema::{FAST, INT_INDEXED, TEXT};
use space_usage::ByteCount; use space_usage::ByteCount;
use space_usage::PerFieldSpaceUsage; use space_usage::PerFieldSpaceUsage;
use Term; use Term;
@@ -305,9 +304,8 @@ mod test {
fn test_empty() { fn test_empty() {
let schema = Schema::builder().build(); let schema = Schema::builder().build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
let reader = index.reader().unwrap();
index.load_searchers().unwrap(); let searcher = reader.searcher();
let searcher = index.searcher();
let searcher_space_usage = searcher.space_usage(); let searcher_space_usage = searcher.space_usage();
assert_eq!(0, searcher_space_usage.total()); assert_eq!(0, searcher_space_usage.total());
} }
@@ -332,7 +330,7 @@ mod test {
#[test] #[test]
fn test_fast_indexed() { fn test_fast_indexed() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", FAST | INT_INDEXED); let name = schema_builder.add_u64_field("name", FAST | INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
@@ -345,8 +343,8 @@ mod test {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage(); let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0); assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len()); assert_eq!(1, searcher_space_usage.segments().len());
@@ -385,8 +383,8 @@ mod test {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = index.searcher(); let searcher = reader.searcher();
let searcher_space_usage = searcher.space_usage(); let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0); assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len()); assert_eq!(1, searcher_space_usage.segments().len());
@@ -424,9 +422,8 @@ mod test {
index_writer.add_document(doc!(name => "hello hi goodbye")); index_writer.add_document(doc!(name => "hello hi goodbye"));
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
let reader = index.reader().unwrap();
index.load_searchers().unwrap(); let searcher = reader.searcher();
let searcher = index.searcher();
let searcher_space_usage = searcher.space_usage(); let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0); assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len()); assert_eq!(1, searcher_space_usage.segments().len());
@@ -450,7 +447,7 @@ mod test {
#[test] #[test]
fn test_deletes() { fn test_deletes() {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let name = schema_builder.add_u64_field("name", INT_INDEXED); let name = schema_builder.add_u64_field("name", INDEXED);
let schema = schema_builder.build(); let schema = schema_builder.build();
let index = Index::create_in_ram(schema.clone()); let index = Index::create_in_ram(schema.clone());
@@ -472,9 +469,8 @@ mod test {
index_writer2.commit().unwrap(); index_writer2.commit().unwrap();
} }
index.load_searchers().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher();
let searcher = index.searcher();
let searcher_space_usage = searcher.space_usage(); let searcher_space_usage = searcher.space_usage();
assert!(searcher_space_usage.total() > 0); assert!(searcher_space_usage.total() > 0);
assert_eq!(1, searcher_space_usage.segments().len()); assert_eq!(1, searcher_space_usage.segments().len());

View File

@@ -159,8 +159,7 @@ mod tests {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
} }
} }
index.load_searchers().unwrap(); let searcher = index.reader().unwrap().searcher();
let searcher = index.searcher();
let field_searcher = searcher.field(text_field); let field_searcher = searcher.field(text_field);
let mut term_it = field_searcher.terms(); let mut term_it = field_searcher.terms();

View File

@@ -1,9 +1,9 @@
use super::TermDictionary; use super::TermDictionary;
use fst::automaton::AlwaysMatch;
use fst::map::{Stream, StreamBuilder};
use fst::Automaton;
use fst::{IntoStreamer, Streamer};
use postings::TermInfo; use postings::TermInfo;
use tantivy_fst::automaton::AlwaysMatch;
use tantivy_fst::map::{Stream, StreamBuilder};
use tantivy_fst::Automaton;
use tantivy_fst::{IntoStreamer, Streamer};
use termdict::TermOrdinal; use termdict::TermOrdinal;
/// `TermStreamerBuilder` is a helper object used to define /// `TermStreamerBuilder` is a helper object used to define

View File

@@ -3,15 +3,15 @@ use super::{TermStreamer, TermStreamerBuilder};
use common::BinarySerializable; use common::BinarySerializable;
use common::CountingWriter; use common::CountingWriter;
use directory::ReadOnlySource; use directory::ReadOnlySource;
use fst;
use fst::raw::Fst;
use fst::Automaton;
use postings::TermInfo; use postings::TermInfo;
use schema::FieldType; use schema::FieldType;
use std::io::{self, Write}; use std::io::{self, Write};
use tantivy_fst;
use tantivy_fst::raw::Fst;
use tantivy_fst::Automaton;
use termdict::TermOrdinal; use termdict::TermOrdinal;
fn convert_fst_error(e: fst::Error) -> io::Error { fn convert_fst_error(e: tantivy_fst::Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, e) io::Error::new(io::ErrorKind::Other, e)
} }
@@ -19,7 +19,7 @@ fn convert_fst_error(e: fst::Error) -> io::Error {
/// ///
/// Inserting must be done in the order of the `keys`. /// Inserting must be done in the order of the `keys`.
pub struct TermDictionaryBuilder<W> { pub struct TermDictionaryBuilder<W> {
fst_builder: fst::MapBuilder<W>, fst_builder: tantivy_fst::MapBuilder<W>,
term_info_store_writer: TermInfoStoreWriter, term_info_store_writer: TermInfoStoreWriter,
term_ord: u64, term_ord: u64,
} }
@@ -30,7 +30,7 @@ where
{ {
/// Creates a new `TermDictionaryBuilder` /// Creates a new `TermDictionaryBuilder`
pub fn create(w: W, _field_type: &FieldType) -> io::Result<Self> { pub fn create(w: W, _field_type: &FieldType) -> io::Result<Self> {
let fst_builder = fst::MapBuilder::new(w).map_err(convert_fst_error)?; let fst_builder = tantivy_fst::MapBuilder::new(w).map_err(convert_fst_error)?;
Ok(TermDictionaryBuilder { Ok(TermDictionaryBuilder {
fst_builder, fst_builder,
term_info_store_writer: TermInfoStoreWriter::new(), term_info_store_writer: TermInfoStoreWriter::new(),
@@ -87,17 +87,9 @@ where
} }
} }
fn open_fst_index(source: ReadOnlySource) -> fst::Map { fn open_fst_index(source: ReadOnlySource) -> tantivy_fst::Map<ReadOnlySource> {
let fst = match source { let fst = Fst::new(source).expect("FST data is corrupted");
ReadOnlySource::Anonymous(data) => { tantivy_fst::Map::from(fst)
Fst::from_shared_bytes(data.data, data.start, data.len).expect("FST data is corrupted")
}
#[cfg(feature = "mmap")]
ReadOnlySource::Mmap(mmap_readonly) => {
Fst::from_mmap(mmap_readonly).expect("FST data is corrupted")
}
};
fst::Map::from(fst)
} }
/// The term dictionary contains all of the terms in /// The term dictionary contains all of the terms in
@@ -107,7 +99,7 @@ fn open_fst_index(source: ReadOnlySource) -> fst::Map {
/// respective `TermOrdinal`. The `TermInfoStore` then makes it /// respective `TermOrdinal`. The `TermInfoStore` then makes it
/// possible to fetch the associated `TermInfo`. /// possible to fetch the associated `TermInfo`.
pub struct TermDictionary { pub struct TermDictionary {
fst_index: fst::Map, fst_index: tantivy_fst::Map<ReadOnlySource>,
term_info_store: TermInfoStore, term_info_store: TermInfoStore,
} }