Compare commits

..

2 Commits

Author SHA1 Message Date
Paul Masurel
72fc1c10a6 rebasing, fixing some test 2019-09-03 10:07:03 +09:00
Paul Masurel
b28654c3fb crate division 2019-09-03 09:53:06 +09:00
119 changed files with 910 additions and 1842 deletions

12
.github/FUNDING.yml vendored
View File

@@ -1,12 +0,0 @@
# These are supported funding model platforms
github: fulmicoton
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

View File

@@ -7,25 +7,14 @@ Tantivy 0.11.0
- Better handling of whitespaces. - Better handling of whitespaces.
- Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik) - Closes #498 - add support for Elastic-style unbounded range queries for alphanumeric types eg. "title:>hello", "weight:>=70.5", "height:<200" (@petr-tik)
- API change around `Box<BoxableTokenizer>`. See detail in #629 - API change around `Box<BoxableTokenizer>`. See detail in #629
- Avoid rebuilding Regex automaton whenever a regex query is reused. #639 (@brainlock) - Avoid rebuilding Regex automaton whenever a regex query is reused. #630 (@brainlock)
- Add footer with some metadata to index files. #605 (@fdb-hiroshima)
- TopDocs collector: ensure stable sorting on equal score. #671 (@brainlock)
- Added handling of pre-tokenized text fields (#642), which will enable users to
load tokens created outside tantivy. See usage in examples/pre_tokenized_text. (@kkoziara)
- Fix crash when committing multiple times with deleted documents. #681 (@brainlock)
## How to update? ## How to update?
- `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct. - `Box<dyn BoxableTokenizer>` has been replaced by a `BoxedTokenizer` struct.
- Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return - Regex are now compiled when the `RegexQuery` instance is built. As a result, it can now return
an error and handling the `Result` is required. an error and handling the `Result` is required.
Tantivy 0.10.2
=====================
- Closes #656. Solving memory leak.
Tantivy 0.10.1 Tantivy 0.10.1
===================== =====================

View File

@@ -13,11 +13,10 @@ keywords = ["search", "information", "retrieval"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
base64 = "0.11.0" base64 = "0.10.0"
byteorder = "1.0" byteorder = "1.0"
crc32fast = "1.2.0"
once_cell = "1.0" once_cell = "1.0"
regex ={version = "1.3.0", default-features = false, features = ["std"]} regex = "1.0"
tantivy-fst = "0.1" tantivy-fst = "0.1"
memmap = {version = "0.7", optional=true} memmap = {version = "0.7", optional=true}
lz4 = {version="1.20", optional=true} lz4 = {version="1.20", optional=true}
@@ -25,6 +24,7 @@ snap = {version="0.2"}
atomicwrites = {version="0.2.2", optional=true} atomicwrites = {version="0.2.2", optional=true}
tempfile = "3.0" tempfile = "3.0"
log = "0.4" log = "0.4"
combine = ">=3.6.0,<4.0.0"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
@@ -34,7 +34,7 @@ itertools = "0.8"
levenshtein_automata = {version="0.1", features=["fst_automaton"]} levenshtein_automata = {version="0.1", features=["fst_automaton"]}
notify = {version="4", optional=true} notify = {version="4", optional=true}
bit-set = "0.5" bit-set = "0.5"
uuid = { version = "0.8", features = ["v4", "serde"] } uuid = { version = "0.7.2", features = ["v4", "serde"] }
crossbeam = "0.7" crossbeam = "0.7"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1" futures-cpupool = "0.1"
@@ -42,7 +42,6 @@ owning_ref = "0.4"
stable_deref_trait = "1.0.0" stable_deref_trait = "1.0.0"
rust-stemmers = "1.1" rust-stemmers = "1.1"
downcast-rs = { version="1.0" } downcast-rs = { version="1.0" }
tantivy-query-grammar = { path="./query-grammar" }
bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]} bitpacking = {version="0.8", default-features = false, features=["bitpacker4x"]}
census = "0.2" census = "0.2"
fnv = "1.0.6" fnv = "1.0.6"
@@ -50,10 +49,14 @@ owned-read = "0.4"
failure = "0.1" failure = "0.1"
htmlescape = "0.3.1" htmlescape = "0.3.1"
fail = "0.3" fail = "0.3"
scoped-pool = "1.0"
murmurhash32 = "0.2" murmurhash32 = "0.2"
chrono = "0.4" chrono = "0.4"
smallvec = "1.0" smallvec = "0.6"
rayon = "1"
tantivy-schema = {path= "./tantivy-schema"}
tantivy-tokenizer = {path= "./tantivy-tokenizer"}
tantivy-common = {path="./tantivy-common"}
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.3" winapi = "0.3"
@@ -64,10 +67,6 @@ maplit = "1"
matches = "0.1.8" matches = "0.1.8"
time = "0.1.42" time = "0.1.42"
[dev-dependencies.fail]
version = "0.3"
features = ["failpoints"]
[profile.release] [profile.release]
opt-level = 3 opt-level = 3
debug = false debug = false
@@ -85,12 +84,12 @@ failpoints = ["fail/failpoints"]
unstable = [] # useful for benches. unstable = [] # useful for benches.
wasm-bindgen = ["uuid/wasm-bindgen"] wasm-bindgen = ["uuid/wasm-bindgen"]
[workspace]
members = ["query-grammar"]
[badges] [badges]
travis-ci = { repository = "tantivy-search/tantivy" } travis-ci = { repository = "tantivy-search/tantivy" }
[dev-dependencies.fail]
features = ["failpoints"]
# Following the "fail" crate best practises, we isolate # Following the "fail" crate best practises, we isolate
# tests that define specific behavior in fail check points # tests that define specific behavior in fail check points
# in a different binary. # in a different binary.
@@ -102,3 +101,7 @@ travis-ci = { repository = "tantivy-search/tantivy" }
name = "failpoints" name = "failpoints"
path = "tests/failpoints/mod.rs" path = "tests/failpoints/mod.rs"
required-features = ["fail/failpoints"] required-features = ["fail/failpoints"]
[workspace]
members = ["tantivy-schema", "tantivy-common", "tantivy-tokenizer"]

View File

@@ -1,3 +0,0 @@
test:
echo "Run test only... No examples."
cargo test --tests --lib

View File

@@ -21,9 +21,9 @@
[![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
**Tantivy** is a **full text search engine library** written in Rust. **Tantivy** is a **full text search engine library** written in rust.
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) or [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
an off-the-shelf search engine server, but rather a crate that can be used an off-the-shelf search engine server, but rather a crate that can be used
to build such a search engine. to build such a search engine.
@@ -31,7 +31,7 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
# Benchmark # Benchmark
Tantivy is typically faster than Lucene, but the results depend on Tantivy is typically faster than Lucene, but the results will depend on
the nature of the queries in your workload. the nature of the queries in your workload.
The following [benchmark](https://tantivy-search.github.io/bench/) break downs The following [benchmark](https://tantivy-search.github.io/bench/) break downs
@@ -40,19 +40,19 @@ performance for different type of queries / collection.
# Features # Features
- Full-text search - Full-text search
- Configurable tokenizer (stemming available for 17 Latin languages with third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)) - Configurable tokenizer. (stemming available for 17 latin languages. Third party support for Chinese ([tantivy-jieba](https://crates.io/crates/tantivy-jieba) and [cang-jie](https://crates.io/crates/cang-jie)) and [Japanese](https://crates.io/crates/tantivy-tokenizer-tiny-segmenter)
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:) - Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
- Tiny startup time (<10ms), perfect for command line tools - Tiny startup time (<10ms), perfect for command line tools
- BM25 scoring (the same as Lucene) - BM25 scoring (the same as lucene)
- Natural query language (e.g. `(michael AND jackson) OR "king of pop"`) - Natural query language `(michael AND jackson) OR "king of pop"`
- Phrase queries search (e.g. `"michael jackson"`) - Phrase queries search (`"michael jackson"`)
- Incremental indexing - Incremental indexing
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop) - Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
- Mmap directory - Mmap directory
- SIMD integer compression when the platform/CPU includes the SSE2 instruction set - SIMD integer compression when the platform/CPU includes the SSE2 instruction set.
- Single valued and multivalued u64, i64, and f64 fast fields (equivalent of doc values in Lucene) - Single valued and multivalued u64, i64 and f64 fast fields (equivalent of doc values in Lucene)
- `&[u8]` fast fields - `&[u8]` fast fields
- Text, i64, u64, f64, dates, and hierarchical facet fields - Text, i64, u64, f64, dates and hierarchical facet fields
- LZ4 compressed document store - LZ4 compressed document store
- Range queries - Range queries
- Faceted search - Faceted search
@@ -61,42 +61,43 @@ performance for different type of queries / collection.
# Non-features # Non-features
- Distributed search is out of the scope of Tantivy. That being said, Tantivy is a - Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
library upon which one could build a distributed search. Serializable/mergeable collector state for instance, library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
are within the scope of Tantivy. are within the scope of tantivy.
# Supported OS and compiler # Supported OS and compiler
Tantivy works on stable Rust (>= 1.27) and supports Linux, MacOS, and Windows. Tantivy works on stable rust (>= 1.27) and supports Linux, MacOS and Windows.
# Getting started # Getting started
- [Tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html) - [tantivy's simple search example](https://tantivy-search.github.io/examples/basic_search.html)
- [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli) - `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine, - [tantivy-cli and its tutorial](https://github.com/tantivy-search/tantivy-cli).
index documents, and search via the CLI or a small server with a REST API. `tantivy-cli` is an actual command line interface that makes it easy for you to create a search engine,
It walks you through getting a wikipedia search engine up and running in a few minutes. index documents and search via the CLI or a small server with a REST API.
- [Reference doc for the last released version](https://docs.rs/tantivy/) It will walk you through getting a wikipedia search engine up and running in a few minutes.
- [reference doc for the last released version](https://docs.rs/tantivy/)
# How can I support this project? # How can I support this project?
There are many ways to support this project. There are many ways to support this project.
- Use Tantivy and tell us about your experience on [Gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com) - Use tantivy and tell us about your experience on [gitter](https://gitter.im/tantivy-search/tantivy) or by email (paul.masurel@gmail.com)
- Report bugs - Report bugs
- Write a blog post - Write a blog post
- Help with documentation by asking questions or submitting PRs - Help with documentation by asking questions or submitting PRs
- Contribute code (you can join [our Gitter](https://gitter.im/tantivy-search/tantivy)) - Contribute code (you can join [our gitter](https://gitter.im/tantivy-search/tantivy) )
- Talk about Tantivy around you - Talk about tantivy around you
- Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton) - Drop a word on on [![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/fulmicoton) or even [![Become a patron](https://c5.patreon.com/external/logo/become_a_patron_button.png)](https://www.patreon.com/fulmicoton)
# Contributing code # Contributing code
We use the GitHub Pull Request workflow: reference a GitHub ticket and/or include a comprehensive commit message when opening a PR. We use the GitHub Pull Request workflow - reference a GitHub ticket and/or include a comprehensive commit message when opening a PR.
## Clone and build locally ## Clone and build locally
Tantivy compiles on stable Rust but requires `Rust >= 1.27`. Tantivy compiles on stable rust but requires `Rust >= 1.27`.
To check out and run tests, you can simply run: To check out and run tests, you can simply run :
```bash ```bash
git clone https://github.com/tantivy-search/tantivy.git git clone https://github.com/tantivy-search/tantivy.git
@@ -107,7 +108,7 @@ To check out and run tests, you can simply run:
## Run tests ## Run tests
Some tests will not run with just `cargo test` because of `fail-rs`. Some tests will not run with just `cargo test` because of `fail-rs`.
To run the tests exhaustively, run `./run-tests.sh`. To run the tests exhaustively, run `./run-tests.sh`
## Debug ## Debug
@@ -115,13 +116,13 @@ You might find it useful to step through the programme with a debugger.
### A failing test ### A failing test
Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that the `target/` directory exists. Use this bash script to find the name of the most recent debug build of Tantivy and run it under `rust-gdb`: Make sure you haven't run `cargo clean` after the most recent `cargo test` or `cargo build` to guarantee that `target/` dir exists. Use this bash script to find the most name of the most recent debug build of tantivy and run it under rust-gdb.
```bash ```bash
find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY find target/debug/ -maxdepth 1 -executable -type f -name "tantivy*" -printf '%TY-%Tm-%Td %TT %p\n' | sort -r | cut -d " " -f 3 | xargs -I RECENT_DBG_TANTIVY rust-gdb RECENT_DBG_TANTIVY
``` ```
Now that you are in `rust-gdb`, you can set breakpoints on lines and methods that match your source code and run the debug executable with flags that you normally pass to `cargo test` like this: Now that you are in rust-gdb, you can set breakpoints on lines and methods that match your source-code and run the debug executable with flags that you normally pass to `cargo test` to like this
```bash ```bash
$gdb run --test-threads 1 --test $NAME_OF_TEST $gdb run --test-threads 1 --test $NAME_OF_TEST
@@ -129,7 +130,7 @@ $gdb run --test-threads 1 --test $NAME_OF_TEST
### An example ### An example
By default, `rustc` compiles everything in the `examples/` directory in debug mode. This makes it easy for you to make examples to reproduce bugs: By default, rustc compiles everything in the `examples/` dir in debug mode. This makes it easy for you to make examples to reproduce bugs.
```bash ```bash
rust-gdb target/debug/examples/$EXAMPLE_NAME rust-gdb target/debug/examples/$EXAMPLE_NAME

View File

@@ -7,7 +7,7 @@ set -ex
main() { main() {
if [ ! -z $CODECOV ]; then if [ ! -z $CODECOV ]; then
echo "Codecov" echo "Codecov"
cargo build --verbose && cargo coverage --verbose --all && bash <(curl -s https://codecov.io/bash) -s target/kcov cargo build --verbose && cargo coverage --verbose && bash <(curl -s https://codecov.io/bash) -s target/kcov
else else
echo "Build" echo "Build"
cross build --target $TARGET cross build --target $TARGET
@@ -15,8 +15,7 @@ main() {
return return
fi fi
echo "Test" echo "Test"
cross test --target $TARGET --no-default-features --features mmap cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
cross test --target $TARGET --no-default-features --features mmap query-grammar
fi fi
for example in $(ls examples/*.rs) for example in $(ls examples/*.rs)
do do

View File

@@ -46,9 +46,10 @@ fn main() -> tantivy::Result<()> {
thread::spawn(move || { thread::spawn(move || {
// we index 100 times the document... for the sake of the example. // we index 100 times the document... for the sake of the example.
for i in 0..100 { for i in 0..100 {
let opstamp = index_writer_clone_1 let opstamp = {
.read().unwrap() //< A read lock is sufficient here. // A read lock is sufficient here.
.add_document( let index_writer_rlock = index_writer_clone_1.read().unwrap();
index_writer_rlock.add_document(
doc!( doc!(
title => "Of Mice and Men", title => "Of Mice and Men",
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \ body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
@@ -59,7 +60,8 @@ fn main() -> tantivy::Result<()> {
fresh and green with every spring, carrying in their lower leaf junctures the \ fresh and green with every spring, carrying in their lower leaf junctures the \
debris of the winters flooding; and sycamores with mottled, white, recumbent \ debris of the winters flooding; and sycamores with mottled, white, recumbent \
limbs and branches that arch over the pool" limbs and branches that arch over the pool"
)); ))
};
println!("add doc {} from thread 1 - opstamp {}", i, opstamp); println!("add doc {} from thread 1 - opstamp {}", i, opstamp);
thread::sleep(Duration::from_millis(20)); thread::sleep(Duration::from_millis(20));
} }

View File

@@ -1,140 +0,0 @@
// # Pre-tokenized text example
//
// This example shows how to use pre-tokenized text. Sometimes yout might
// want to index and search through text which is already split into
// tokens by some external tool.
//
// In this example we will:
// - use tantivy tokenizer to create tokens and load them directly into tantivy,
// - import tokenized text straight from json,
// - perform a search on documents with pre-tokenized text
use tantivy::tokenizer::{PreTokenizedString, SimpleTokenizer, Token, TokenStream, Tokenizer};
use tantivy::collector::{Count, TopDocs};
use tantivy::query::TermQuery;
use tantivy::schema::*;
use tantivy::{doc, Index, ReloadPolicy};
use tempfile::TempDir;
fn pre_tokenize_text(text: &str) -> Vec<Token> {
let mut token_stream = SimpleTokenizer.token_stream(text);
let mut tokens = vec![];
while token_stream.advance() {
tokens.push(token_stream.token().clone());
}
tokens
}
fn main() -> tantivy::Result<()> {
let index_path = TempDir::new()?;
let mut schema_builder = Schema::builder();
schema_builder.add_text_field("title", TEXT | STORED);
schema_builder.add_text_field("body", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_dir(&index_path, schema.clone())?;
let mut index_writer = index.writer(50_000_000)?;
// We can create a document manually, by setting the fields
// one by one in a Document object.
let title = schema.get_field("title").unwrap();
let body = schema.get_field("body").unwrap();
let title_text = "The Old Man and the Sea";
let body_text = "He was an old man who fished alone in a skiff in the Gulf Stream";
// Content of our first document
// We create `PreTokenizedString` which contains original text and vector of tokens
let title_tok = PreTokenizedString {
text: String::from(title_text),
tokens: pre_tokenize_text(title_text),
};
println!(
"Original text: \"{}\" and tokens: {:?}",
title_tok.text, title_tok.tokens
);
let body_tok = PreTokenizedString {
text: String::from(body_text),
tokens: pre_tokenize_text(body_text),
};
// Now lets create a document and add our `PreTokenizedString` using
// `add_pre_tokenized_text` method of `Document`
let mut old_man_doc = Document::default();
old_man_doc.add_pre_tokenized_text(title, &title_tok);
old_man_doc.add_pre_tokenized_text(body, &body_tok);
// ... now let's just add it to the IndexWriter
index_writer.add_document(old_man_doc);
// Pretokenized text can also be fed as JSON
let short_man_json = r#"{
"title":[{
"text":"The Old Man",
"tokens":[
{"offset_from":0,"offset_to":3,"position":0,"text":"The","position_length":1},
{"offset_from":4,"offset_to":7,"position":1,"text":"Old","position_length":1},
{"offset_from":8,"offset_to":11,"position":2,"text":"Man","position_length":1}
]
}]
}"#;
let short_man_doc = schema.parse_document(&short_man_json)?;
index_writer.add_document(short_man_doc);
// Let's commit changes
index_writer.commit()?;
// ... and now is the time to query our index
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.try_into()?;
let searcher = reader.searcher();
// We want to get documents with token "Man", we will use TermQuery to do it
// Using PreTokenizedString means the tokens are stored as is avoiding stemming
// and lowercasing, which preserves full words in their original form
let query = TermQuery::new(
Term::from_field_text(title, "Man"),
IndexRecordOption::Basic,
);
let (top_docs, count) = searcher
.search(&query, &(TopDocs::with_limit(2), Count))
.unwrap();
assert_eq!(count, 2);
for (_score, doc_address) in top_docs {
let retrieved_doc = searcher.doc(doc_address)?;
println!("Document: {}", schema.to_json(&retrieved_doc));
}
// In contrary to the previous query, when we search for the "man" term we
// should get no results, as it's not one of the indexed tokens. SimpleTokenizer
// only splits text on whitespace / punctuation.
let query = TermQuery::new(
Term::from_field_text(title, "man"),
IndexRecordOption::Basic,
);
let (_top_docs, count) = searcher
.search(&query, &(TopDocs::with_limit(2), Count))
.unwrap();
assert_eq!(count, 0);
Ok(())
}

View File

@@ -1,16 +0,0 @@
[package]
name = "tantivy-query-grammar"
version = "0.11.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
license = "MIT"
categories = ["database-implementations", "data-structures"]
description = """Search engine library"""
documentation = "https://tantivy-search.github.io/tantivy/tantivy/index.html"
homepage = "https://github.com/tantivy-search/tantivy"
repository = "https://github.com/tantivy-search/tantivy"
readme = "README.md"
keywords = ["search", "information", "retrieval"]
edition = "2018"
[dependencies]
combine = ">=3.6.0,<4.0.0"

View File

@@ -1,17 +0,0 @@
#![recursion_limit = "100"]
mod occur;
mod query_grammar;
mod user_input_ast;
use combine::parser::Parser;
pub use crate::occur::Occur;
use crate::query_grammar::parse_to_ast;
pub use crate::user_input_ast::{UserInputAST, UserInputBound, UserInputLeaf, UserInputLiteral};
pub struct Error;
pub fn parse_query(query: &str) -> Result<UserInputAST, Error> {
let (user_input_ast, _remaining) = parse_to_ast().parse(query).map_err(|_| Error)?;
Ok(user_input_ast)
}

View File

@@ -123,4 +123,5 @@ mod tests {
assert_eq!(count_collector.harvest(), 2); assert_eq!(count_collector.harvest(), 2);
} }
} }
} }

View File

@@ -515,7 +515,7 @@ mod tests {
#[should_panic(expected = "Tried to add a facet which is a descendant of \ #[should_panic(expected = "Tried to add a facet which is a descendant of \
an already added facet.")] an already added facet.")]
fn test_misused_facet_collector() { fn test_misused_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/country/europe")); facet_collector.add_facet(Facet::from("/country/europe"));
} }
@@ -546,7 +546,7 @@ mod tests {
#[test] #[test]
fn test_non_used_facet_collector() { fn test_non_used_facet_collector() {
let mut facet_collector = FacetCollector::for_field(Field::from_field_id(0)); let mut facet_collector = FacetCollector::for_field(Field(0));
facet_collector.add_facet(Facet::from("/country")); facet_collector.add_facet(Facet::from("/country"));
facet_collector.add_facet(Facet::from("/countryeurope")); facet_collector.add_facet(Facet::from("/countryeurope"));
} }
@@ -599,18 +599,19 @@ mod tests {
); );
} }
} }
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use crate::collector::FacetCollector; use collector::FacetCollector;
use crate::query::AllQuery; use query::AllQuery;
use crate::schema::{Facet, Schema}; use rand::{thread_rng, Rng};
use crate::Index; use schema::Facet;
use rand::seq::SliceRandom; use schema::Schema;
use rand::thread_rng;
use test::Bencher; use test::Bencher;
use Index;
#[bench] #[bench]
fn bench_facet_collector(b: &mut Bencher) { fn bench_facet_collector(b: &mut Bencher) {
@@ -627,7 +628,7 @@ mod bench {
} }
} }
// 40425 docs // 40425 docs
docs[..].shuffle(&mut thread_rng()); thread_rng().shuffle(&mut docs[..]);
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
for doc in docs { for doc in docs {
@@ -636,7 +637,7 @@ mod bench {
index_writer.commit().unwrap(); index_writer.commit().unwrap();
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
b.iter(|| { b.iter(|| {
let searcher = reader.searcher(); let searcher = index.searcher();
let facet_collector = FacetCollector::for_field(facet_field); let facet_collector = FacetCollector::for_field(facet_field);
searcher.search(&AllQuery, &facet_collector).unwrap(); searcher.search(&AllQuery, &facet_collector).unwrap();
}); });

View File

@@ -12,9 +12,6 @@ use std::collections::BinaryHeap;
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the /// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
/// default Rust heap is a max heap, whereas a min heap is needed. /// default Rust heap is a max heap, whereas a min heap is needed.
/// ///
/// Additionally, it guarantees stable sorting: in case of a tie on the feature, the document
/// address is used.
///
/// WARNING: equality is not what you would expect here. /// WARNING: equality is not what you would expect here.
/// Two elements are equal if their feature is equal, and regardless of whether `doc` /// Two elements are equal if their feature is equal, and regardless of whether `doc`
/// is equal. This should be perfectly fine for this usage, but let's make sure this /// is equal. This should be perfectly fine for this usage, but let's make sure this
@@ -24,37 +21,29 @@ struct ComparableDoc<T, D> {
doc: D, doc: D,
} }
impl<T: PartialOrd, D: PartialOrd> PartialOrd for ComparableDoc<T, D> { impl<T: PartialOrd, D> PartialOrd for ComparableDoc<T, D> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
impl<T: PartialOrd, D: PartialOrd> Ord for ComparableDoc<T, D> { impl<T: PartialOrd, D> Ord for ComparableDoc<T, D> {
#[inline] #[inline]
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
// Reversed to make BinaryHeap work as a min-heap other
let by_feature = other
.feature .feature
.partial_cmp(&self.feature) .partial_cmp(&self.feature)
.unwrap_or(Ordering::Equal); .unwrap_or_else(|| Ordering::Equal)
let lazy_by_doc_address = || self.doc.partial_cmp(&other.doc).unwrap_or(Ordering::Equal);
// In case of a tie on the feature, we sort by ascending
// `DocAddress` in order to ensure a stable sorting of the
// documents.
by_feature.then_with(lazy_by_doc_address)
} }
} }
impl<T: PartialOrd, D: PartialOrd> PartialEq for ComparableDoc<T, D> { impl<T: PartialOrd, D> PartialEq for ComparableDoc<T, D> {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal self.cmp(other) == Ordering::Equal
} }
} }
impl<T: PartialOrd, D: PartialOrd> Eq for ComparableDoc<T, D> {} impl<T: PartialOrd, D> Eq for ComparableDoc<T, D> {}
pub(crate) struct TopCollector<T> { pub(crate) struct TopCollector<T> {
limit: usize, limit: usize,
@@ -225,94 +214,4 @@ mod tests {
] ]
); );
} }
#[test]
fn test_top_segment_collector_stable_ordering_for_equal_feature() {
// given that the documents are collected in ascending doc id order,
// when harvesting we have to guarantee stable sorting in case of a tie
// on the score
let doc_ids_collection = [4, 5, 6];
let score = 3.14;
let mut top_collector_limit_2 = TopSegmentCollector::new(0, 2);
for id in &doc_ids_collection {
top_collector_limit_2.collect(*id, score);
}
let mut top_collector_limit_3 = TopSegmentCollector::new(0, 3);
for id in &doc_ids_collection {
top_collector_limit_3.collect(*id, score);
}
assert_eq!(
top_collector_limit_2.harvest(),
top_collector_limit_3.harvest()[..2].to_vec(),
);
}
}
#[cfg(all(test, feature = "unstable"))]
mod bench {
use super::TopSegmentCollector;
use test::Bencher;
#[bench]
fn bench_top_segment_collector_collect_not_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 400);
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_at_capacity(b: &mut Bencher) {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
b.iter(|| {
for i in 0..100 {
top_collector.collect(i, 0.8);
}
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_many_ties(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
for i in 0..100 {
top_collector.collect(i, 0.8);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
#[bench]
fn bench_top_segment_collector_collect_and_harvest_no_tie(b: &mut Bencher) {
b.iter(|| {
let mut top_collector = TopSegmentCollector::new(0, 100);
let mut score = 1.0;
for i in 0..100 {
score += 1.0;
top_collector.collect(i, score);
}
// it would be nice to be able to do the setup N times but still
// measure only harvest(). We can't since harvest() consumes
// the top_collector.
top_collector.harvest()
});
}
} }

View File

@@ -15,16 +15,13 @@ use crate::SegmentLocalId;
use crate::SegmentReader; use crate::SegmentReader;
use std::fmt; use std::fmt;
/// The `TopDocs` collector keeps track of the top `K` documents /// The Top Score Collector keeps track of the K documents
/// sorted by their score. /// sorted by their score.
/// ///
/// The implementation is based on a `BinaryHeap`. /// The implementation is based on a `BinaryHeap`.
/// The theorical complexity for collecting the top `K` out of `n` documents /// The theorical complexity for collecting the top `K` out of `n` documents
/// is `O(n log K)`. /// is `O(n log K)`.
/// ///
/// This collector guarantees a stable sorting in case of a tie on the
/// document score. As such, it is suitable to implement pagination.
///
/// ```rust /// ```rust
/// use tantivy::collector::TopDocs; /// use tantivy::collector::TopDocs;
/// use tantivy::query::QueryParser; /// use tantivy::query::QueryParser;
@@ -431,13 +428,12 @@ impl SegmentCollector for TopScoreSegmentCollector {
mod tests { mod tests {
use super::TopDocs; use super::TopDocs;
use crate::collector::Collector; use crate::collector::Collector;
use crate::query::{AllQuery, Query, QueryParser}; use crate::query::{Query, QueryParser};
use crate::schema::{Field, Schema, FAST, STORED, TEXT}; use crate::schema::{Field, Schema, FAST, STORED, TEXT};
use crate::DocAddress; use crate::DocAddress;
use crate::Index; use crate::Index;
use crate::IndexWriter; use crate::IndexWriter;
use crate::Score; use crate::Score;
use itertools::Itertools;
fn make_index() -> Index { fn make_index() -> Index {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@@ -498,29 +494,6 @@ mod tests {
); );
} }
#[test]
fn test_top_collector_stable_sorting() {
let index = make_index();
// using AllQuery to get a constant score
let searcher = index.reader().unwrap().searcher();
let page_1 = searcher.search(&AllQuery, &TopDocs::with_limit(2)).unwrap();
let page_2 = searcher.search(&AllQuery, &TopDocs::with_limit(3)).unwrap();
// precondition for the test to be meaningful: we did get documents
// with the same score
assert!(page_1.iter().map(|result| result.0).all_equal());
assert!(page_2.iter().map(|result| result.0).all_equal());
// sanity check since we're relying on make_index()
assert_eq!(page_1.len(), 2);
assert_eq!(page_2.len(), 3);
assert_eq!(page_1, &page_2[..page_1.len()]);
}
#[test] #[test]
#[should_panic] #[should_panic]
fn test_top_0() { fn test_top_0() {
@@ -578,7 +551,7 @@ mod tests {
)); ));
}); });
let searcher = index.reader().unwrap().searcher(); let searcher = index.reader().unwrap().searcher();
let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field::from_field_id(2)); let top_collector = TopDocs::with_limit(4).order_by_u64_field(Field(2));
let segment_reader = searcher.segment_reader(0u32); let segment_reader = searcher.segment_reader(0u32);
top_collector top_collector
.for_segment(0, segment_reader) .for_segment(0, segment_reader)
@@ -619,4 +592,5 @@ mod tests {
let query = query_parser.parse_query(query).unwrap(); let query = query_parser.parse_query(query).unwrap();
(index, query) (index, query)
} }
} }

View File

@@ -2,7 +2,7 @@ use crate::common::BinarySerializable;
use crate::common::CountingWriter; use crate::common::CountingWriter;
use crate::common::VInt; use crate::common::VInt;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::{TerminatingWrite, WritePtr}; use crate::directory::WritePtr;
use crate::schema::Field; use crate::schema::Field;
use crate::space_usage::FieldUsage; use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
@@ -42,7 +42,7 @@ pub struct CompositeWrite<W = WritePtr> {
offsets: HashMap<FileAddr, u64>, offsets: HashMap<FileAddr, u64>,
} }
impl<W: TerminatingWrite + Write> CompositeWrite<W> { impl<W: Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file /// Crate a new API writer that writes a composite file
/// in a given write. /// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> { pub fn wrap(w: W) -> CompositeWrite<W> {
@@ -91,7 +91,8 @@ impl<W: TerminatingWrite + Write> CompositeWrite<W> {
let footer_len = (self.write.written_bytes() - footer_offset) as u32; let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?; footer_len.serialize(&mut self.write)?;
self.write.terminate() self.write.flush()?;
Ok(())
} }
} }
@@ -199,13 +200,13 @@ mod test {
let w = directory.open_write(path).unwrap(); let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w); let mut composite_write = CompositeWrite::wrap(w);
{ {
let mut write_0 = composite_write.for_field(Field::from_field_id(0u32)); let mut write_0 = composite_write.for_field(Field(0u32));
VInt(32431123u64).serialize(&mut write_0).unwrap(); VInt(32431123u64).serialize(&mut write_0).unwrap();
write_0.flush().unwrap(); write_0.flush().unwrap();
} }
{ {
let mut write_4 = composite_write.for_field(Field::from_field_id(4u32)); let mut write_4 = composite_write.for_field(Field(4u32));
VInt(2).serialize(&mut write_4).unwrap(); VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap(); write_4.flush().unwrap();
} }
@@ -215,18 +216,14 @@ mod test {
let r = directory.open_read(path).unwrap(); let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r).unwrap(); let composite_file = CompositeFile::open(&r).unwrap();
{ {
let file0 = composite_file let file0 = composite_file.open_read(Field(0u32)).unwrap();
.open_read(Field::from_field_id(0u32))
.unwrap();
let mut file0_buf = file0.as_slice(); let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0; let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0); assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64); assert_eq!(payload_0, 32431123u64);
} }
{ {
let file4 = composite_file let file4 = composite_file.open_read(Field(4u32)).unwrap();
.open_read(Field::from_field_id(4u32))
.unwrap();
let mut file4_buf = file4.as_slice(); let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0; let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0); assert_eq!(file4_buf.len(), 0);
@@ -234,4 +231,5 @@ mod test {
} }
} }
} }
} }

View File

@@ -1,6 +1,6 @@
use crate::Result; use crate::Result;
use crossbeam::channel; use crossbeam::channel;
use rayon::{ThreadPool, ThreadPoolBuilder}; use scoped_pool::{Pool, ThreadConfig};
/// Search executor whether search request are single thread or multithread. /// Search executor whether search request are single thread or multithread.
/// ///
@@ -11,7 +11,7 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
/// used by the client. Second, we may stop using rayon in the future. /// used by the client. Second, we may stop using rayon in the future.
pub enum Executor { pub enum Executor {
SingleThread, SingleThread,
ThreadPool(ThreadPool), ThreadPool(Pool),
} }
impl Executor { impl Executor {
@@ -21,12 +21,10 @@ impl Executor {
} }
// Creates an Executor that dispatches the tasks in a thread pool. // Creates an Executor that dispatches the tasks in a thread pool.
pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Result<Executor> { pub fn multi_thread(num_threads: usize, prefix: &'static str) -> Executor {
let pool = ThreadPoolBuilder::new() let thread_config = ThreadConfig::new().prefix(prefix);
.num_threads(num_threads) let pool = Pool::with_thread_config(num_threads, thread_config);
.thread_name(move |num| format!("{}{}", prefix, num)) Executor::ThreadPool(pool)
.build()?;
Ok(Executor::ThreadPool(pool))
} }
// Perform a map in the thread pool. // Perform a map in the thread pool.
@@ -50,9 +48,9 @@ impl Executor {
let num_fruits = args_with_indices.len(); let num_fruits = args_with_indices.len();
let fruit_receiver = { let fruit_receiver = {
let (fruit_sender, fruit_receiver) = channel::unbounded(); let (fruit_sender, fruit_receiver) = channel::unbounded();
pool.scope(|scope| { pool.scoped(|scope| {
for arg_with_idx in args_with_indices { for arg_with_idx in args_with_indices {
scope.spawn(|_| { scope.execute(|| {
let (idx, arg) = arg_with_idx; let (idx, arg) = arg_with_idx;
let fruit = f(arg); let fruit = f(arg);
if let Err(err) = fruit_sender.send((idx, fruit)) { if let Err(err) = fruit_sender.send((idx, fruit)) {
@@ -105,7 +103,6 @@ mod tests {
#[should_panic] //< unfortunately the panic message is not propagated #[should_panic] //< unfortunately the panic message is not propagated
fn test_panic_propagates_multi_thread() { fn test_panic_propagates_multi_thread() {
let _result: Vec<usize> = Executor::multi_thread(1, "search-test") let _result: Vec<usize> = Executor::multi_thread(1, "search-test")
.unwrap()
.map( .map(
|_| { |_| {
panic!("panic should propagate"); panic!("panic should propagate");
@@ -129,7 +126,6 @@ mod tests {
#[test] #[test]
fn test_map_multithread() { fn test_map_multithread() {
let result: Vec<usize> = Executor::multi_thread(3, "search-test") let result: Vec<usize> = Executor::multi_thread(3, "search-test")
.unwrap()
.map(|i| Ok(i * 2), 0..10) .map(|i| Ok(i * 2), 0..10)
.unwrap(); .unwrap();
assert_eq!(result.len(), 10); assert_eq!(result.len(), 10);

View File

@@ -26,10 +26,9 @@ use crate::IndexWriter;
use crate::Result; use crate::Result;
use num_cpus; use num_cpus;
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::fmt; use std::fmt;
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
use std::path::{Path, PathBuf}; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> { fn load_metas(directory: &dyn Directory, inventory: &SegmentMetaInventory) -> Result<IndexMeta> {
@@ -73,16 +72,15 @@ impl Index {
/// Replace the default single thread search executor pool /// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads. /// by a thread pool with a given number of threads.
pub fn set_multithread_executor(&mut self, num_threads: usize) -> Result<()> { pub fn set_multithread_executor(&mut self, num_threads: usize) {
self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-")?); self.executor = Arc::new(Executor::multi_thread(num_threads, "thrd-tantivy-search-"));
Ok(())
} }
/// Replace the default single thread search executor pool /// Replace the default single thread search executor pool
/// by a thread pool with a given number of threads. /// by a thread pool with a given number of threads.
pub fn set_default_multithread_executor(&mut self) -> Result<()> { pub fn set_default_multithread_executor(&mut self) {
let default_num_threads = num_cpus::get(); let default_num_threads = num_cpus::get();
self.set_multithread_executor(default_num_threads) self.set_multithread_executor(default_num_threads);
} }
/// Creates a new index using the `RAMDirectory`. /// Creates a new index using the `RAMDirectory`.
@@ -370,11 +368,6 @@ impl Index {
.map(SegmentMeta::id) .map(SegmentMeta::id)
.collect()) .collect())
} }
/// Returns the set of corrupted files
pub fn validate_checksum(&self) -> Result<HashSet<PathBuf>> {
self.directory.list_damaged().map_err(Into::into)
}
} }
impl fmt::Debug for Index { impl fmt::Debug for Index {
@@ -602,4 +595,5 @@ mod tests {
assert_eq!(searcher.num_docs(), 8_000); assert_eq!(searcher.num_docs(), 8_000);
assert!(mem_right_after_merge_finished < mem_right_after_commit); assert!(mem_right_after_merge_finished < mem_right_after_commit);
} }
} }

View File

@@ -150,21 +150,6 @@ impl SegmentMeta {
self.num_deleted_docs() > 0 self.num_deleted_docs() > 0
} }
/// Updates the max_doc value from the `SegmentMeta`.
///
/// This method is only used when updating `max_doc` from 0
/// as we finalize a fresh new segment.
pub(crate) fn with_max_doc(self, max_doc: u32) -> SegmentMeta {
assert_eq!(self.tracked.max_doc, 0);
assert!(self.tracked.deletes.is_none());
let tracked = self.tracked.map(move |inner_meta| InnerSegmentMeta {
segment_id: inner_meta.segment_id,
max_doc,
deletes: None,
});
SegmentMeta { tracked }
}
#[doc(hidden)] #[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> SegmentMeta {
let delete_meta = DeleteMeta { let delete_meta = DeleteMeta {

View File

@@ -50,17 +50,6 @@ impl Segment {
&self.meta &self.meta
} }
/// Updates the max_doc value from the `SegmentMeta`.
///
/// This method is only used when updating `max_doc` from 0
/// as we finalize a fresh new segment.
pub(crate) fn with_max_doc(self, max_doc: u32) -> Segment {
Segment {
index: self.index,
meta: self.meta.with_max_doc(max_doc),
}
}
#[doc(hidden)] #[doc(hidden)]
pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment { pub fn with_delete_meta(self, num_deleted_docs: u32, opstamp: Opstamp) -> Segment {
Segment { Segment {

View File

@@ -76,7 +76,7 @@ impl SegmentId {
} }
/// Error type used when parsing a `SegmentId` from a string fails. /// Error type used when parsing a `SegmentId` from a string fails.
pub struct SegmentIdParseError(uuid::Error); pub struct SegmentIdParseError(uuid::parser::ParseError);
impl Error for SegmentIdParseError {} impl Error for SegmentIdParseError {}

View File

@@ -1,4 +1,3 @@
use crate::common::CompositeFile;
use crate::common::HasLen; use crate::common::HasLen;
use crate::core::InvertedIndexReader; use crate::core::InvertedIndexReader;
use crate::core::Segment; use crate::core::Segment;
@@ -15,6 +14,7 @@ use crate::schema::Schema;
use crate::space_usage::SegmentSpaceUsage; use crate::space_usage::SegmentSpaceUsage;
use crate::store::StoreReader; use crate::store::StoreReader;
use crate::termdict::TermDictionary; use crate::termdict::TermDictionary;
use crate::CompositeFile;
use crate::DocId; use crate::DocId;
use crate::Result; use crate::Result;
use fail::fail_point; use fail::fail_point;

View File

@@ -118,8 +118,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// ///
/// Specifically, subsequent writes or flushes should /// Specifically, subsequent writes or flushes should
/// have no effect on the returned `ReadOnlySource` object. /// have no effect on the returned `ReadOnlySource` object.
///
/// You should only use this to read files create with [`open_write`]
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>; fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError>;
/// Removes a file /// Removes a file
@@ -159,8 +157,6 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
/// atomic_write. /// atomic_write.
/// ///
/// This should only be used for small files. /// This should only be used for small files.
///
/// You should only use this to read files create with [`atomic_write`]
fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>; fn atomic_read(&self, path: &Path) -> Result<Vec<u8>, OpenReadError>;
/// Atomically replace the content of a file with data. /// Atomically replace the content of a file with data.

View File

@@ -1,213 +0,0 @@
use crate::directory::read_only_source::ReadOnlySource;
use crate::directory::{AntiCallToken, TerminatingWrite};
use byteorder::{ByteOrder, LittleEndian};
use crc32fast::Hasher;
use std::io;
use std::io::Write;
const COMMON_FOOTER_SIZE: usize = 4 * 5;
#[derive(Debug, Clone, PartialEq)]
pub struct Footer {
pub tantivy_version: (u32, u32, u32),
pub meta: String,
pub versioned_footer: VersionedFooter,
}
impl Footer {
pub fn new(versioned_footer: VersionedFooter) -> Self {
let tantivy_version = (
env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
);
Footer {
tantivy_version,
meta: format!(
"tantivy {}.{}.{}, index v{}",
tantivy_version.0,
tantivy_version.1,
tantivy_version.2,
versioned_footer.version()
),
versioned_footer,
}
}
pub fn to_bytes(&self) -> Vec<u8> {
let mut res = self.versioned_footer.to_bytes();
res.extend_from_slice(self.meta.as_bytes());
let len = res.len();
res.resize(len + COMMON_FOOTER_SIZE, 0);
let mut common_footer = &mut res[len..];
LittleEndian::write_u32(&mut common_footer, self.meta.len() as u32);
LittleEndian::write_u32(&mut common_footer[4..], self.tantivy_version.0);
LittleEndian::write_u32(&mut common_footer[8..], self.tantivy_version.1);
LittleEndian::write_u32(&mut common_footer[12..], self.tantivy_version.2);
LittleEndian::write_u32(&mut common_footer[16..], (len + COMMON_FOOTER_SIZE) as u32);
res
}
pub fn from_bytes(data: &[u8]) -> Result<Self, io::Error> {
let len = data.len();
if len < COMMON_FOOTER_SIZE + 4 {
// 4 bytes for index version, stored in versioned footer
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!("File corrupted. The footer len must be over 24, while the entire file len is {}", len)
)
);
}
let size = LittleEndian::read_u32(&data[len - 4..]) as usize;
if len < size as usize {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The footer len is {}, while the entire file len is {}",
size, len
),
));
}
let footer = &data[len - size as usize..];
let meta_len = LittleEndian::read_u32(&footer[size - 20..]) as usize;
let tantivy_major = LittleEndian::read_u32(&footer[size - 16..]);
let tantivy_minor = LittleEndian::read_u32(&footer[size - 12..]);
let tantivy_patch = LittleEndian::read_u32(&footer[size - 8..]);
Ok(Footer {
tantivy_version: (tantivy_major, tantivy_minor, tantivy_patch),
meta: String::from_utf8_lossy(&footer[size - meta_len - 20..size - 20]).into_owned(),
versioned_footer: VersionedFooter::from_bytes(&footer[..size - meta_len - 20])?,
})
}
pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> {
let footer = Footer::from_bytes(source.as_slice())?;
let reader = source.slice_to(source.as_slice().len() - footer.size());
Ok((footer, reader))
}
pub fn size(&self) -> usize {
self.versioned_footer.size() as usize + self.meta.len() + 20
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum VersionedFooter {
UnknownVersion { version: u32, size: u32 },
V0(u32), // crc
}
impl VersionedFooter {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
VersionedFooter::V0(crc) => {
let mut res = vec![0; 8];
LittleEndian::write_u32(&mut res, 0);
LittleEndian::write_u32(&mut res[4..], *crc);
res
}
VersionedFooter::UnknownVersion { .. } => {
panic!("Unsupported index should never get serialized");
}
}
}
pub fn from_bytes(footer: &[u8]) -> Result<Self, io::Error> {
assert!(footer.len() >= 4);
let version = LittleEndian::read_u32(footer);
match version {
0 => {
if footer.len() == 8 {
Ok(VersionedFooter::V0(LittleEndian::read_u32(&footer[4..])))
} else {
Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"File corrupted. The versioned footer len is {}, while it should be 8",
footer.len()
),
))
}
}
version => Ok(VersionedFooter::UnknownVersion {
version,
size: footer.len() as u32,
}),
}
}
pub fn size(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 8,
VersionedFooter::UnknownVersion { size, .. } => *size,
}
}
pub fn version(&self) -> u32 {
match self {
VersionedFooter::V0(_) => 0,
VersionedFooter::UnknownVersion { version, .. } => *version,
}
}
pub fn crc(&self) -> Option<u32> {
match self {
VersionedFooter::V0(crc) => Some(*crc),
VersionedFooter::UnknownVersion { .. } => None,
}
}
}
pub(crate) struct FooterProxy<W: TerminatingWrite> {
/// always Some except after terminate call
hasher: Option<Hasher>,
/// always Some except after terminate call
writer: Option<W>,
}
impl<W: TerminatingWrite> FooterProxy<W> {
pub fn new(writer: W) -> Self {
FooterProxy {
hasher: Some(Hasher::new()),
writer: Some(writer),
}
}
}
impl<W: TerminatingWrite> Write for FooterProxy<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let count = self.writer.as_mut().unwrap().write(buf)?;
self.hasher.as_mut().unwrap().update(&buf[..count]);
Ok(count)
}
fn flush(&mut self) -> io::Result<()> {
self.writer.as_mut().unwrap().flush()
}
}
impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
let crc = self.hasher.take().unwrap().finalize();
let footer = Footer::new(VersionedFooter::V0(crc)).to_bytes();
let mut writer = self.writer.take().unwrap();
writer.write_all(&footer)?;
writer.terminate()
}
}
#[cfg(test)]
mod tests {
use crate::directory::footer::{Footer, VersionedFooter};
#[test]
fn test_serialize_deserialize_footer() {
let crc = 123456;
let footer = Footer::new(VersionedFooter::V0(crc));
let footer_bytes = footer.to_bytes();
assert_eq!(Footer::from_bytes(&footer_bytes).unwrap(), footer);
}
}

View File

@@ -1,6 +1,5 @@
use crate::core::MANAGED_FILEPATH; use crate::core::MANAGED_FILEPATH;
use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, IOError, LockError, OpenReadError, OpenWriteError};
use crate::directory::footer::{Footer, FooterProxy};
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::Lock; use crate::directory::Lock;
use crate::directory::META_LOCK; use crate::directory::META_LOCK;
@@ -9,7 +8,6 @@ use crate::directory::{WatchCallback, WatchHandle};
use crate::error::DataCorruption; use crate::error::DataCorruption;
use crate::Directory; use crate::Directory;
use crate::Result; use crate::Result;
use crc32fast::Hasher;
use serde_json; use serde_json;
use std::collections::HashSet; use std::collections::HashSet;
use std::io; use std::io;
@@ -209,59 +207,17 @@ impl ManagedDirectory {
} }
Ok(()) Ok(())
} }
/// Verify checksum of a managed file
pub fn validate_checksum(&self, path: &Path) -> result::Result<bool, OpenReadError> {
let reader = self.directory.open_read(path)?;
let (footer, data) = Footer::extract_footer(reader)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
let mut hasher = Hasher::new();
hasher.update(data.as_slice());
let crc = hasher.finalize();
Ok(footer
.versioned_footer
.crc()
.map(|v| v == crc)
.unwrap_or(false))
}
/// List files for which checksum does not match content
pub fn list_damaged(&self) -> result::Result<HashSet<PathBuf>, OpenReadError> {
let mut hashset = HashSet::new();
let managed_paths = self
.meta_informations
.read()
.expect("Managed directory rlock poisoned in list damaged.")
.managed_paths
.clone();
for path in managed_paths.into_iter() {
if !self.validate_checksum(&path)? {
hashset.insert(path);
}
}
Ok(hashset)
}
} }
impl Directory for ManagedDirectory { impl Directory for ManagedDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
let read_only_source = self.directory.open_read(path)?; self.directory.open_read(path)
let (_footer, reader) = Footer::extract_footer(read_only_source)
.map_err(|err| IOError::with_path(path.to_path_buf(), err))?;
Ok(reader)
} }
fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> { fn open_write(&mut self, path: &Path) -> result::Result<WritePtr, OpenWriteError> {
self.register_file_as_managed(path) self.register_file_as_managed(path)
.map_err(|e| IOError::with_path(path.to_owned(), e))?; .map_err(|e| IOError::with_path(path.to_owned(), e))?;
Ok(io::BufWriter::new(Box::new(FooterProxy::new( self.directory.open_write(path)
self.directory
.open_write(path)?
.into_inner()
.map_err(|_| ())
.expect("buffer should be empty"),
))))
} }
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> { fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
@@ -303,9 +259,8 @@ impl Clone for ManagedDirectory {
#[cfg(test)] #[cfg(test)]
mod tests_mmap_specific { mod tests_mmap_specific {
use crate::directory::{Directory, ManagedDirectory, MmapDirectory, TerminatingWrite}; use crate::directory::{Directory, ManagedDirectory, MmapDirectory};
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::OpenOptions;
use std::io::Write; use std::io::Write;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use tempfile::TempDir; use tempfile::TempDir;
@@ -320,14 +275,15 @@ mod tests_mmap_specific {
{ {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let write_file = managed_directory.open_write(test_path1).unwrap(); let mut write_file = managed_directory.open_write(test_path1).unwrap();
write_file.terminate().unwrap(); write_file.flush().unwrap();
managed_directory managed_directory
.atomic_write(test_path2, &[0u8, 1u8]) .atomic_write(test_path2, &[0u8, 1u8])
.unwrap(); .unwrap();
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(managed_directory.exists(test_path2)); assert!(managed_directory.exists(test_path2));
let living_files: HashSet<PathBuf> = [test_path1.to_owned()].iter().cloned().collect(); let living_files: HashSet<PathBuf> =
[test_path1.to_owned()].into_iter().cloned().collect();
managed_directory.garbage_collect(|| living_files); managed_directory.garbage_collect(|| living_files);
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
assert!(!managed_directory.exists(test_path2)); assert!(!managed_directory.exists(test_path2));
@@ -354,9 +310,9 @@ mod tests_mmap_specific {
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap(); let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap(); managed_directory
write.write_all(&[0u8, 1u8]).unwrap(); .atomic_write(test_path1, &vec![0u8, 1u8])
write.terminate().unwrap(); .unwrap();
assert!(managed_directory.exists(test_path1)); assert!(managed_directory.exists(test_path1));
let _mmap_read = managed_directory.open_read(test_path1).unwrap(); let _mmap_read = managed_directory.open_read(test_path1).unwrap();
@@ -375,38 +331,4 @@ mod tests_mmap_specific {
} }
} }
#[test]
fn test_checksum() {
let test_path1: &'static Path = Path::new("some_path_for_test");
let test_path2: &'static Path = Path::new("other_test_path");
let tempdir = TempDir::new().unwrap();
let tempdir_path = PathBuf::from(tempdir.path());
let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap();
let mut managed_directory = ManagedDirectory::wrap(mmap_directory).unwrap();
let mut write = managed_directory.open_write(test_path1).unwrap();
write.write_all(&[0u8, 1u8]).unwrap();
write.terminate().unwrap();
let mut write = managed_directory.open_write(test_path2).unwrap();
write.write_all(&[3u8, 4u8, 5u8]).unwrap();
write.terminate().unwrap();
assert!(managed_directory.list_damaged().unwrap().is_empty());
let mut corrupted_path = tempdir_path.clone();
corrupted_path.push(test_path2);
let mut file = OpenOptions::new()
.write(true)
.open(&corrupted_path)
.unwrap();
file.write_all(&[255u8]).unwrap();
file.flush().unwrap();
drop(file);
let damaged = managed_directory.list_damaged().unwrap();
assert_eq!(damaged.len(), 1);
assert!(damaged.contains(test_path2));
}
} }

View File

@@ -11,7 +11,6 @@ use crate::directory::error::{
DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError, DeleteError, IOError, OpenDirectoryError, OpenReadError, OpenWriteError,
}; };
use crate::directory::read_only_source::BoxedData; use crate::directory::read_only_source::BoxedData;
use crate::directory::AntiCallToken;
use crate::directory::Directory; use crate::directory::Directory;
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::Lock; use crate::directory::Lock;
@@ -19,7 +18,7 @@ use crate::directory::ReadOnlySource;
use crate::directory::WatchCallback; use crate::directory::WatchCallback;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use crate::directory::WatchHandle; use crate::directory::WatchHandle;
use crate::directory::{TerminatingWrite, WritePtr}; use crate::directory::WritePtr;
use atomicwrites; use atomicwrites;
use memmap::Mmap; use memmap::Mmap;
use std::collections::HashMap; use std::collections::HashMap;
@@ -142,28 +141,42 @@ impl MmapCache {
} }
} }
struct WatcherWrapper { struct InnerWatcherWrapper {
_watcher: Mutex<notify::RecommendedWatcher>, _watcher: Mutex<notify::RecommendedWatcher>,
watcher_router: Arc<WatchCallbackList>, watcher_router: WatchCallbackList,
}
impl InnerWatcherWrapper {
pub fn new(path: &Path) -> Result<(Self, Receiver<notify::RawEvent>), notify::Error> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel();
// We need to initialize the
let mut watcher = notify::raw_watcher(tx)?;
watcher.watch(path, RecursiveMode::Recursive)?;
let inner = InnerWatcherWrapper {
_watcher: Mutex::new(watcher),
watcher_router: Default::default(),
};
Ok((inner, watcher_recv))
}
}
#[derive(Clone)]
struct WatcherWrapper {
inner: Arc<InnerWatcherWrapper>,
} }
impl WatcherWrapper { impl WatcherWrapper {
pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> { pub fn new(path: &Path) -> Result<Self, OpenDirectoryError> {
let (tx, watcher_recv): (Sender<RawEvent>, Receiver<RawEvent>) = channel(); let (inner, watcher_recv) = InnerWatcherWrapper::new(path).map_err(|err| match err {
// We need to initialize the notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()),
let watcher = notify::raw_watcher(tx) _ => {
.and_then(|mut watcher| { panic!("Unknown error while starting watching directory {:?}", path);
watcher.watch(path, RecursiveMode::Recursive)?; }
Ok(watcher) })?;
}) let watcher_wrapper = WatcherWrapper {
.map_err(|err| match err { inner: Arc::new(inner),
notify::Error::PathNotFound => OpenDirectoryError::DoesNotExist(path.to_owned()), };
_ => { let watcher_wrapper_clone = watcher_wrapper.clone();
panic!("Unknown error while starting watching directory {:?}", path);
}
})?;
let watcher_router: Arc<WatchCallbackList> = Default::default();
let watcher_router_clone = watcher_router.clone();
thread::Builder::new() thread::Builder::new()
.name("meta-file-watch-thread".to_string()) .name("meta-file-watch-thread".to_string())
.spawn(move || { .spawn(move || {
@@ -174,7 +187,7 @@ impl WatcherWrapper {
// We might want to be more accurate than this at one point. // We might want to be more accurate than this at one point.
if let Some(filename) = changed_path.file_name() { if let Some(filename) = changed_path.file_name() {
if filename == *META_FILEPATH { if filename == *META_FILEPATH {
watcher_router_clone.broadcast(); watcher_wrapper_clone.inner.watcher_router.broadcast();
} }
} }
} }
@@ -187,15 +200,13 @@ impl WatcherWrapper {
} }
} }
} }
})?; })
Ok(WatcherWrapper { .expect("Failed to spawn thread to watch meta.json");
_watcher: Mutex::new(watcher), Ok(watcher_wrapper)
watcher_router,
})
} }
pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle { pub fn watch(&mut self, watch_callback: WatchCallback) -> WatchHandle {
self.watcher_router.subscribe(watch_callback) self.inner.watcher_router.subscribe(watch_callback)
} }
} }
@@ -254,7 +265,7 @@ impl MmapDirectoryInner {
} }
} }
if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() { if let Some(watch_wrapper) = self.watcher.write().unwrap().as_mut() {
Ok(watch_wrapper.watch(watch_callback)) return Ok(watch_wrapper.watch(watch_callback));
} else { } else {
unreachable!("At this point, watch wrapper is supposed to be initialized"); unreachable!("At this point, watch wrapper is supposed to be initialized");
} }
@@ -401,12 +412,6 @@ impl Seek for SafeFileWriter {
} }
} }
impl TerminatingWrite for SafeFileWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
impl Directory for MmapDirectory { impl Directory for MmapDirectory {
fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> { fn open_read(&self, path: &Path) -> result::Result<ReadOnlySource, OpenReadError> {
debug!("Open Read {:?}", path); debug!("Open Read {:?}", path);
@@ -534,7 +539,7 @@ impl Directory for MmapDirectory {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
// There are more tests in directory/mod.rs // There are more tests in directory/lib.rs
// The following tests are specific to the MmapDirectory // The following tests are specific to the MmapDirectory
use super::*; use super::*;

View File

@@ -9,7 +9,6 @@ mod mmap_directory;
mod directory; mod directory;
mod directory_lock; mod directory_lock;
mod footer;
mod managed_directory; mod managed_directory;
mod ram_directory; mod ram_directory;
mod read_only_source; mod read_only_source;
@@ -25,49 +24,18 @@ pub use self::ram_directory::RAMDirectory;
pub use self::read_only_source::ReadOnlySource; pub use self::read_only_source::ReadOnlySource;
pub(crate) use self::watch_event_router::WatchCallbackList; pub(crate) use self::watch_event_router::WatchCallbackList;
pub use self::watch_event_router::{WatchCallback, WatchHandle}; pub use self::watch_event_router::{WatchCallback, WatchHandle};
use std::io::{self, BufWriter, Write}; use std::io::{BufWriter, Write};
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
pub use self::mmap_directory::MmapDirectory; pub use self::mmap_directory::MmapDirectory;
pub use self::managed_directory::ManagedDirectory; pub use self::managed_directory::ManagedDirectory;
/// Struct used to prevent from calling [`terminate_ref`](trait.TerminatingWrite#method.terminate_ref) directly
pub struct AntiCallToken(());
/// Trait used to indicate when no more write need to be done on a writer
pub trait TerminatingWrite: Write {
/// Indicate that the writer will no longer be used. Internally call terminate_ref.
fn terminate(mut self) -> io::Result<()>
where
Self: Sized,
{
self.terminate_ref(AntiCallToken(()))
}
/// You should implement this function to define custom behavior.
/// This function should flush any buffer it may hold.
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()>;
}
impl<W: TerminatingWrite + ?Sized> TerminatingWrite for Box<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.as_mut().terminate_ref(token)
}
}
impl<W: TerminatingWrite> TerminatingWrite for BufWriter<W> {
fn terminate_ref(&mut self, a: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.get_mut().terminate_ref(a)
}
}
/// Write object for Directory. /// Write object for Directory.
/// ///
/// `WritePtr` are required to implement both Write /// `WritePtr` are required to implement both Write
/// and Seek. /// and Seek.
pub type WritePtr = BufWriter<Box<dyn TerminatingWrite>>; pub type WritePtr = BufWriter<Box<dyn Write>>;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;

View File

@@ -1,9 +1,8 @@
use crate::core::META_FILEPATH; use crate::core::META_FILEPATH;
use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError}; use crate::directory::error::{DeleteError, OpenReadError, OpenWriteError};
use crate::directory::AntiCallToken;
use crate::directory::WatchCallbackList; use crate::directory::WatchCallbackList;
use crate::directory::WritePtr;
use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle}; use crate::directory::{Directory, ReadOnlySource, WatchCallback, WatchHandle};
use crate::directory::{TerminatingWrite, WritePtr};
use fail::fail_point; use fail::fail_point;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
@@ -72,12 +71,6 @@ impl Write for VecWriter {
} }
} }
impl TerminatingWrite for VecWriter {
fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> {
self.flush()
}
}
#[derive(Default)] #[derive(Default)]
struct InnerDirectory { struct InnerDirectory {
fs: HashMap<PathBuf, ReadOnlySource>, fs: HashMap<PathBuf, ReadOnlySource>,

View File

@@ -127,7 +127,7 @@ fn test_watch(directory: &mut dyn Directory) {
assert!(directory assert!(directory
.atomic_write(Path::new("meta.json"), b"random_test_data_2") .atomic_write(Path::new("meta.json"), b"random_test_data_2")
.is_ok()); .is_ok());
for _ in 0..1_000 { for _ in 0..100 {
if counter.load(Ordering::SeqCst) > i { if counter.load(Ordering::SeqCst) > i {
break; break;
} }

View File

@@ -152,4 +152,5 @@ mod tests {
thread::sleep(Duration::from_millis(WAIT_TIME)); thread::sleep(Duration::from_millis(WAIT_TIME));
assert_eq!(2, counter.load(Ordering::SeqCst)); assert_eq!(2, counter.load(Ordering::SeqCst));
} }
} }

View File

@@ -170,9 +170,3 @@ impl From<serde_json::Error> for TantivyError {
TantivyError::IOError(io_err.into()) TantivyError::IOError(io_err.into())
} }
} }
impl From<rayon::ThreadPoolBuildError> for TantivyError {
fn from(error: rayon::ThreadPoolBuildError) -> TantivyError {
TantivyError::SystemError(error.to_string())
}
}

View File

@@ -10,14 +10,11 @@ use std::io::Write;
/// Write a delete `BitSet` /// Write a delete `BitSet`
/// ///
/// where `delete_bitset` is the set of deleted `DocId`. /// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset( pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
delete_bitset: &BitSet, let max_doc = delete_bitset.capacity();
max_doc: u32,
writer: &mut WritePtr,
) -> io::Result<()> {
let mut byte = 0u8; let mut byte = 0u8;
let mut shift = 0u8; let mut shift = 0u8;
for doc in 0..(max_doc as usize) { for doc in 0..max_doc {
if delete_bitset.contains(doc) { if delete_bitset.contains(doc) {
byte |= 1 << shift; byte |= 1 << shift;
} }
@@ -89,17 +86,18 @@ mod tests {
use bit_set::BitSet; use bit_set::BitSet;
use std::path::PathBuf; use std::path::PathBuf;
fn test_delete_bitset_helper(bitset: &BitSet, max_doc: u32) { fn test_delete_bitset_helper(bitset: &BitSet) {
let test_path = PathBuf::from("test"); let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create(); let mut directory = RAMDirectory::create();
{ {
let mut writer = directory.open_write(&*test_path).unwrap(); let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, max_doc, &mut writer).unwrap(); write_delete_bitset(bitset, &mut writer).unwrap();
} }
{ {
let source = directory.open_read(&test_path).unwrap(); let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source); let delete_bitset = DeleteBitSet::open(source);
for doc in 0..max_doc as usize { let n = bitset.capacity();
for doc in 0..n {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId)); assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
} }
assert_eq!(delete_bitset.len(), bitset.len()); assert_eq!(delete_bitset.len(), bitset.len());
@@ -112,7 +110,7 @@ mod tests {
let mut bitset = BitSet::with_capacity(10); let mut bitset = BitSet::with_capacity(10);
bitset.insert(1); bitset.insert(1);
bitset.insert(9); bitset.insert(9);
test_delete_bitset_helper(&bitset, 10); test_delete_bitset_helper(&bitset);
} }
{ {
let mut bitset = BitSet::with_capacity(8); let mut bitset = BitSet::with_capacity(8);
@@ -121,7 +119,7 @@ mod tests {
bitset.insert(3); bitset.insert(3);
bitset.insert(5); bitset.insert(5);
bitset.insert(7); bitset.insert(7);
test_delete_bitset_helper(&bitset, 8); test_delete_bitset_helper(&bitset);
} }
} }
} }

View File

@@ -148,13 +148,13 @@ fn value_to_u64(value: &Value) -> u64 {
mod tests { mod tests {
use super::*; use super::*;
use crate::common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use crate::fastfield::FastFieldReader;
use crate::schema::Document; use crate::schema::Document;
use crate::schema::Field; use crate::schema::Field;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;
use crate::CompositeFile;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use rand::prelude::SliceRandom; use rand::prelude::SliceRandom;
use rand::rngs::StdRng; use rand::rngs::StdRng;
@@ -429,6 +429,7 @@ mod tests {
} }
} }
} }
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
@@ -436,9 +437,9 @@ mod bench {
use super::tests::FIELD; use super::tests::FIELD;
use super::tests::{generate_permutation, SCHEMA}; use super::tests::{generate_permutation, SCHEMA};
use super::*; use super::*;
use crate::common::CompositeFile; use common::CompositeFile;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::FastFieldReader; use fastfield::FastFieldReader;
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
use test::{self, Bencher}; use test::{self, Bencher};
@@ -536,4 +537,5 @@ mod bench {
}); });
} }
} }
} }

View File

@@ -5,8 +5,8 @@ use crate::postings::UnorderedTermId;
use crate::schema::{Document, Field}; use crate::schema::{Document, Field};
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use crate::DocId; use crate::DocId;
use fnv::FnvHashMap;
use itertools::Itertools; use itertools::Itertools;
use std::collections::HashMap;
use std::io; use std::io;
/// Writer for multi-valued (as in, more than one value per document) /// Writer for multi-valued (as in, more than one value per document)
@@ -102,7 +102,7 @@ impl MultiValueIntFastFieldWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut FastFieldSerializer, serializer: &mut FastFieldSerializer,
mapping_opt: Option<&FnvHashMap<UnorderedTermId, TermOrdinal>>, mapping_opt: Option<&HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> { ) -> io::Result<()> {
{ {
// writing the offset index // writing the offset index

View File

@@ -2,12 +2,12 @@ use super::FastValue;
use crate::common::bitpacker::BitUnpacker; use crate::common::bitpacker::BitUnpacker;
use crate::common::compute_num_bits; use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeFile;
use crate::directory::ReadOnlySource; use crate::directory::ReadOnlySource;
use crate::directory::{Directory, RAMDirectory, WritePtr}; use crate::directory::{Directory, RAMDirectory, WritePtr};
use crate::fastfield::{FastFieldSerializer, FastFieldsWriter}; use crate::fastfield::{FastFieldSerializer, FastFieldsWriter};
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::FAST; use crate::schema::FAST;
use crate::CompositeFile;
use crate::DocId; use crate::DocId;
use owning_ref::OwningRef; use owning_ref::OwningRef;
use std::collections::HashMap; use std::collections::HashMap;

View File

@@ -1,9 +1,9 @@
use crate::common::CompositeFile;
use crate::fastfield::BytesFastFieldReader; use crate::fastfield::BytesFastFieldReader;
use crate::fastfield::MultiValueIntFastFieldReader; use crate::fastfield::MultiValueIntFastFieldReader;
use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader}; use crate::fastfield::{FastFieldNotAvailableError, FastFieldReader};
use crate::schema::{Cardinality, Field, FieldType, Schema}; use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::space_usage::PerFieldSpaceUsage; use crate::space_usage::PerFieldSpaceUsage;
use crate::CompositeFile;
use crate::Result; use crate::Result;
use std::collections::HashMap; use std::collections::HashMap;
@@ -59,7 +59,8 @@ impl FastFieldReaders {
fast_bytes: Default::default(), fast_bytes: Default::default(),
fast_fields_composite: fast_fields_composite.clone(), fast_fields_composite: fast_fields_composite.clone(),
}; };
for (field, field_entry) in schema.fields() { for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
if field_type == &FieldType::Bytes { if field_type == &FieldType::Bytes {
let idx_reader = fast_fields_composite let idx_reader = fast_fields_composite

View File

@@ -1,10 +1,10 @@
use crate::common::bitpacker::BitPacker; use crate::common::bitpacker::BitPacker;
use crate::common::compute_num_bits; use crate::common::compute_num_bits;
use crate::common::BinarySerializable; use crate::common::BinarySerializable;
use crate::common::CompositeWrite;
use crate::common::CountingWriter; use crate::common::CountingWriter;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::schema::Field; use crate::schema::Field;
use crate::CompositeWrite;
use std::io::{self, Write}; use std::io::{self, Write};
/// `FastFieldSerializer` is in charge of serializing /// `FastFieldSerializer` is in charge of serializing

View File

@@ -6,7 +6,6 @@ use crate::fastfield::{BytesFastFieldWriter, FastFieldSerializer};
use crate::postings::UnorderedTermId; use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldType, Schema}; use crate::schema::{Cardinality, Document, Field, FieldType, Schema};
use crate::termdict::TermOrdinal; use crate::termdict::TermOrdinal;
use fnv::FnvHashMap;
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
@@ -24,7 +23,8 @@ impl FastFieldsWriter {
let mut multi_values_writers = Vec::new(); let mut multi_values_writers = Vec::new();
let mut bytes_value_writers = Vec::new(); let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() { for (field_id, field_entry) in schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let default_value = match *field_entry.field_type() { let default_value = match *field_entry.field_type() {
FieldType::I64(_) => common::i64_to_u64(0i64), FieldType::I64(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64), FieldType::F64(_) => common::f64_to_u64(0.0f64),
@@ -116,7 +116,7 @@ impl FastFieldsWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut FastFieldSerializer, serializer: &mut FastFieldSerializer,
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>, mapping: &HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>,
) -> io::Result<()> { ) -> io::Result<()> {
for field_writer in &self.single_value_writers { for field_writer in &self.single_value_writers {
field_writer.serialize(serializer)?; field_writer.serialize(serializer)?;

View File

@@ -1,6 +1,6 @@
use crate::common::CompositeWrite;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::schema::Field; use crate::schema::Field;
use crate::CompositeWrite;
use std::io; use std::io;
use std::io::Write; use std::io::Write;

View File

@@ -22,14 +22,11 @@ impl FieldNormsWriter {
pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> { pub(crate) fn fields_with_fieldnorm(schema: &Schema) -> Vec<Field> {
schema schema
.fields() .fields()
.filter_map(|(field, field_entry)| { .iter()
if field_entry.is_indexed() { .enumerate()
Some(field) .filter(|&(_, field_entry)| field_entry.is_indexed())
} else { .map(|(field, _)| Field(field as u32))
None .collect::<Vec<Field>>()
}
})
.collect::<Vec<_>>()
} }
/// Initialize with state for tracking the field norm fields /// Initialize with state for tracking the field norm fields
@@ -38,7 +35,7 @@ impl FieldNormsWriter {
let fields = FieldNormsWriter::fields_with_fieldnorm(schema); let fields = FieldNormsWriter::fields_with_fieldnorm(schema);
let max_field = fields let max_field = fields
.iter() .iter()
.map(Field::field_id) .map(|field| field.0)
.max() .max()
.map(|max_field_id| max_field_id as usize + 1) .map(|max_field_id| max_field_id as usize + 1)
.unwrap_or(0); .unwrap_or(0);
@@ -53,8 +50,8 @@ impl FieldNormsWriter {
/// ///
/// Will extend with 0-bytes for documents that have not been seen. /// Will extend with 0-bytes for documents that have not been seen.
pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) { pub fn fill_up_to_max_doc(&mut self, max_doc: DocId) {
for field in self.fields.iter() { for &field in self.fields.iter() {
self.fieldnorms_buffer[field.field_id() as usize].resize(max_doc as usize, 0u8); self.fieldnorms_buffer[field.0 as usize].resize(max_doc as usize, 0u8);
} }
} }
@@ -67,7 +64,7 @@ impl FieldNormsWriter {
/// * field - the field being set /// * field - the field being set
/// * fieldnorm - the number of terms present in document `doc` in field `field` /// * fieldnorm - the number of terms present in document `doc` in field `field`
pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) { pub fn record(&mut self, doc: DocId, field: Field, fieldnorm: u32) {
let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.field_id() as usize]; let fieldnorm_buffer: &mut Vec<u8> = &mut self.fieldnorms_buffer[field.0 as usize];
assert!( assert!(
fieldnorm_buffer.len() <= doc as usize, fieldnorm_buffer.len() <= doc as usize,
"Cannot register a given fieldnorm twice" "Cannot register a given fieldnorm twice"
@@ -80,7 +77,7 @@ impl FieldNormsWriter {
/// Serialize the seen fieldnorm values to the serializer for all fields. /// Serialize the seen fieldnorm values to the serializer for all fields.
pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> { pub fn serialize(&self, fieldnorms_serializer: &mut FieldNormsSerializer) -> io::Result<()> {
for &field in self.fields.iter() { for &field in self.fields.iter() {
let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.field_id() as usize][..]; let fieldnorm_values: &[u8] = &self.fieldnorms_buffer[field.0 as usize][..];
fieldnorms_serializer.serialize_field(field, fieldnorm_values)?; fieldnorms_serializer.serialize_field(field, fieldnorm_values)?;
} }
Ok(()) Ok(())

View File

@@ -258,7 +258,7 @@ mod tests {
let delete_queue = DeleteQueue::new(); let delete_queue = DeleteQueue::new();
let make_op = |i: usize| { let make_op = |i: usize| {
let field = Field::from_field_id(1u32); let field = Field(1u32);
DeleteOperation { DeleteOperation {
opstamp: i as u64, opstamp: i as u64,
term: Term::from_field_u64(field, i as u64), term: Term::from_field_u64(field, i as u64),

View File

@@ -8,7 +8,6 @@ use crate::core::SegmentId;
use crate::core::SegmentMeta; use crate::core::SegmentMeta;
use crate::core::SegmentReader; use crate::core::SegmentReader;
use crate::directory::DirectoryLock; use crate::directory::DirectoryLock;
use crate::directory::TerminatingWrite;
use crate::docset::DocSet; use crate::docset::DocSet;
use crate::error::TantivyError; use crate::error::TantivyError;
use crate::fastfield::write_delete_bitset; use crate::fastfield::write_delete_bitset;
@@ -148,6 +147,7 @@ pub(crate) fn advance_deletes(
}; };
let delete_cursor = segment_entry.delete_cursor(); let delete_cursor = segment_entry.delete_cursor();
compute_deleted_bitset( compute_deleted_bitset(
&mut delete_bitset, &mut delete_bitset,
&segment_reader, &segment_reader,
@@ -167,8 +167,7 @@ pub(crate) fn advance_deletes(
if num_deleted_docs > 0 { if num_deleted_docs > 0 {
segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp); segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp);
let mut delete_file = segment.open_write(SegmentComponent::DELETE)?; let mut delete_file = segment.open_write(SegmentComponent::DELETE)?;
write_delete_bitset(&delete_bitset, max_doc, &mut delete_file)?; write_delete_bitset(&delete_bitset, &mut delete_file)?;
delete_file.terminate()?;
} }
} }
segment_entry.set_meta(segment.meta().clone()); segment_entry.set_meta(segment.meta().clone());
@@ -177,13 +176,13 @@ pub(crate) fn advance_deletes(
fn index_documents( fn index_documents(
memory_budget: usize, memory_budget: usize,
segment: Segment, segment: &Segment,
grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>, grouped_document_iterator: &mut dyn Iterator<Item = OperationGroup>,
segment_updater: &mut SegmentUpdater, segment_updater: &mut SegmentUpdater,
mut delete_cursor: DeleteCursor, mut delete_cursor: DeleteCursor,
) -> Result<bool> { ) -> Result<bool> {
let schema = segment.schema(); let schema = segment.schema();
let segment_id = segment.id();
let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?; let mut segment_writer = SegmentWriter::for_segment(memory_budget, segment.clone(), &schema)?;
for document_group in grouped_document_iterator { for document_group in grouped_document_iterator {
for doc in document_group { for doc in document_group {
@@ -203,32 +202,22 @@ fn index_documents(
return Ok(false); return Ok(false);
} }
let max_doc = segment_writer.max_doc(); let num_docs = segment_writer.max_doc();
// this is ensured by the call to peek before starting // this is ensured by the call to peek before starting
// the worker thread. // the worker thread.
assert!(max_doc > 0); assert!(num_docs > 0);
let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?; let doc_opstamps: Vec<Opstamp> = segment_writer.finalize()?;
let segment_meta = segment.index().new_segment_meta(segment_id, num_docs);
let segment_with_max_doc = segment.with_max_doc(max_doc);
let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap()); let last_docstamp: Opstamp = *(doc_opstamps.last().unwrap());
let delete_bitset_opt = apply_deletes( let delete_bitset_opt =
&segment_with_max_doc, apply_deletes(&segment, &mut delete_cursor, &doc_opstamps, last_docstamp)?;
&mut delete_cursor,
&doc_opstamps,
last_docstamp,
)?;
let segment_entry = SegmentEntry::new( let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, delete_bitset_opt);
segment_with_max_doc.meta().clone(), Ok(segment_updater.add_segment(segment_entry))
delete_cursor,
delete_bitset_opt,
);
segment_updater.add_segment(segment_entry);
Ok(true)
} }
fn apply_deletes( fn apply_deletes(
@@ -244,9 +233,7 @@ fn apply_deletes(
} }
let segment_reader = SegmentReader::open(segment)?; let segment_reader = SegmentReader::open(segment)?;
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps); let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
let mut deleted_bitset = BitSet::with_capacity(segment_reader.max_doc() as usize);
let max_doc = segment.meta().max_doc();
let mut deleted_bitset = BitSet::with_capacity(max_doc as usize);
let may_have_deletes = compute_deleted_bitset( let may_have_deletes = compute_deleted_bitset(
&mut deleted_bitset, &mut deleted_bitset,
&segment_reader, &segment_reader,
@@ -418,7 +405,7 @@ impl IndexWriter {
let segment = index.new_segment(); let segment = index.new_segment();
index_documents( index_documents(
mem_budget, mem_budget,
segment, &segment,
&mut document_iterator, &mut document_iterator,
&mut segment_updater, &mut segment_updater,
delete_cursor.clone(), delete_cursor.clone(),
@@ -1190,4 +1177,5 @@ mod tests {
assert!(clear_again.is_ok()); assert!(clear_again.is_ok());
assert!(commit_again.is_ok()); assert!(commit_again.is_ok());
} }
} }

View File

@@ -190,7 +190,8 @@ impl IndexMerger {
fast_field_serializer: &mut FastFieldSerializer, fast_field_serializer: &mut FastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>, mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
) -> Result<()> { ) -> Result<()> {
for (field, field_entry) in self.schema.fields() { for (field_id, field_entry) in self.schema.fields().iter().enumerate() {
let field = Field(field_id as u32);
let field_type = field_entry.field_type(); let field_type = field_entry.field_type();
match *field_type { match *field_type {
FieldType::HierarchicalFacet => { FieldType::HierarchicalFacet => {
@@ -648,12 +649,15 @@ impl IndexMerger {
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, TermOrdinalMapping>> { ) -> Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new(); let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() { for (field_ord, field_entry) in self.schema.fields().iter().enumerate() {
if field_entry.is_indexed() { if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = let indexed_field = Field(field_ord as u32);
self.write_postings_for_field(field, field_entry.field_type(), serializer)? if let Some(term_ordinal_mapping) = self.write_postings_for_field(
{ indexed_field,
term_ordinal_mappings.insert(field, term_ordinal_mapping); field_entry.field_type(),
serializer,
)? {
term_ordinal_mappings.insert(indexed_field, term_ordinal_mapping);
} }
} }
} }

View File

@@ -28,25 +28,3 @@ pub use self::segment_writer::SegmentWriter;
/// Alias for the default merge policy, which is the `LogMergePolicy`. /// Alias for the default merge policy, which is the `LogMergePolicy`.
pub type DefaultMergePolicy = LogMergePolicy; pub type DefaultMergePolicy = LogMergePolicy;
#[cfg(test)]
mod tests {
use crate::schema::{self, Schema};
use crate::{Index, Term};
#[test]
fn test_advance_delete_bug() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", schema::TEXT);
let index = Index::create_from_tempdir(schema_builder.build()).unwrap();
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
// there must be one deleted document in the segment
index_writer.add_document(doc!(text_field=>"b"));
index_writer.delete_term(Term::from_field_text(text_field, "b"));
// we need enough data to trigger the bug (at least 32 documents)
for _ in 0..32 {
index_writer.add_document(doc!(text_field=>"c"));
}
index_writer.commit().unwrap();
index_writer.commit().unwrap();
}
}

View File

@@ -134,4 +134,5 @@ mod tests {
} }
assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]); assert_eq!(segment_ids(&segment_register), vec![segment_id_merged]);
} }
} }

View File

@@ -199,12 +199,14 @@ impl SegmentUpdater {
self.0.pool.spawn_fn(move || Ok(f(me_clone))) self.0.pool.spawn_fn(move || Ok(f(me_clone)))
} }
pub fn add_segment(&self, segment_entry: SegmentEntry) { pub fn add_segment(&self, segment_entry: SegmentEntry) -> bool {
self.run_async(|segment_updater| { self.run_async(|segment_updater| {
segment_updater.0.segment_manager.add_segment(segment_entry); segment_updater.0.segment_manager.add_segment(segment_entry);
segment_updater.consider_merge_options(); segment_updater.consider_merge_options();
true
}) })
.forget(); .forget();
true
} }
/// Orders `SegmentManager` to remove all segments /// Orders `SegmentManager` to remove all segments

View File

@@ -6,15 +6,14 @@ use crate::fieldnorm::FieldNormsWriter;
use crate::indexer::segment_serializer::SegmentSerializer; use crate::indexer::segment_serializer::SegmentSerializer;
use crate::postings::compute_table_size; use crate::postings::compute_table_size;
use crate::postings::MultiFieldPostingsWriter; use crate::postings::MultiFieldPostingsWriter;
use crate::schema::FieldEntry;
use crate::schema::FieldType; use crate::schema::FieldType;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::Term; use crate::schema::Term;
use crate::schema::Value; use crate::schema::Value;
use crate::schema::{Field, FieldEntry};
use crate::tokenizer::BoxedTokenizer; use crate::tokenizer::BoxedTokenizer;
use crate::tokenizer::FacetTokenizer; use crate::tokenizer::FacetTokenizer;
use crate::tokenizer::PreTokenizedStream; use crate::tokenizer::{TokenStream, Tokenizer};
use crate::tokenizer::{TokenStream, TokenStreamChain, Tokenizer};
use crate::DocId; use crate::DocId;
use crate::Opstamp; use crate::Opstamp;
use crate::Result; use crate::Result;
@@ -71,10 +70,12 @@ impl SegmentWriter {
let table_num_bits = initial_table_size(memory_budget)?; let table_num_bits = initial_table_size(memory_budget)?;
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?; let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits); let multifield_postings = MultiFieldPostingsWriter::new(schema, table_num_bits);
let tokenizers = schema let tokenizers =
.fields() schema
.map( .fields()
|(_, field_entry): (Field, &FieldEntry)| match field_entry.field_type() { .iter()
.map(FieldEntry::field_type)
.map(|field_type| match *field_type {
FieldType::Str(ref text_options) => text_options FieldType::Str(ref text_options) => text_options
.get_indexing_options() .get_indexing_options()
.and_then(|text_index_option| { .and_then(|text_index_option| {
@@ -82,9 +83,8 @@ impl SegmentWriter {
segment.index().tokenizers().get(tokenizer_name) segment.index().tokenizers().get(tokenizer_name)
}), }),
_ => None, _ => None,
}, })
) .collect();
.collect();
Ok(SegmentWriter { Ok(SegmentWriter {
max_doc: 0, max_doc: 0,
multifield_postings, multifield_postings,
@@ -159,44 +159,26 @@ impl SegmentWriter {
} }
} }
FieldType::Str(_) => { FieldType::Str(_) => {
let mut token_streams: Vec<Box<dyn TokenStream>> = vec![]; let num_tokens = if let Some(ref mut tokenizer) =
let mut offsets = vec![]; self.tokenizers[field.0 as usize]
let mut total_offset = 0; {
let texts: Vec<&str> = field_values
for field_value in field_values { .iter()
match field_value.value() { .flat_map(|field_value| match *field_value.value() {
Value::PreTokStr(tok_str) => { Value::Str(ref text) => Some(text.as_str()),
offsets.push(total_offset); _ => None,
if let Some(last_token) = tok_str.tokens.last() { })
total_offset += last_token.offset_to; .collect();
} if texts.is_empty() {
0
token_streams } else {
.push(Box::new(PreTokenizedStream::from(tok_str.clone()))); let mut token_stream = tokenizer.token_stream_texts(&texts[..]);
} self.multifield_postings
Value::Str(ref text) => { .index_text(doc_id, field, &mut token_stream)
if let Some(ref mut tokenizer) =
self.tokenizers[field.field_id() as usize]
{
offsets.push(total_offset);
total_offset += text.len();
token_streams.push(tokenizer.token_stream(text));
}
}
_ => (),
} }
}
let num_tokens = if token_streams.is_empty() {
0
} else { } else {
let mut token_stream: Box<dyn TokenStream> = 0
Box::new(TokenStreamChain::new(offsets, token_streams));
self.multifield_postings
.index_text(doc_id, field, &mut token_stream)
}; };
self.fieldnorms_writer.record(doc_id, field, num_tokens); self.fieldnorms_writer.record(doc_id, field, num_tokens);
} }
FieldType::U64(ref int_option) => { FieldType::U64(ref int_option) => {
@@ -314,4 +296,5 @@ mod tests {
assert_eq!(initial_table_size(10_000_000).unwrap(), 17); assert_eq!(initial_table_size(10_000_000).unwrap(), 17);
assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19); assert_eq!(initial_table_size(1_000_000_000).unwrap(), 19);
} }
} }

View File

@@ -1,4 +1,5 @@
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")] #![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
#![recursion_limit = "100"]
#![cfg_attr(all(feature = "unstable", test), feature(test))] #![cfg_attr(all(feature = "unstable", test), feature(test))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))] #![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
#![doc(test(attr(allow(unused_variables), deny(warnings))))] #![doc(test(attr(allow(unused_variables), deny(warnings))))]
@@ -101,9 +102,6 @@
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
#[cfg_attr(test, macro_use)]
extern crate serde_json;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
@@ -120,6 +118,9 @@ mod functional_test;
#[macro_use] #[macro_use]
mod macros; mod macros;
mod composite_file;
pub(crate) use composite_file::{CompositeFile, CompositeWrite};
pub use crate::error::TantivyError; pub use crate::error::TantivyError;
#[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")] #[deprecated(since = "0.7.0", note = "please use `tantivy::TantivyError` instead")]
@@ -132,22 +133,22 @@ pub type Result<T> = std::result::Result<T, error::TantivyError>;
/// Tantivy DateTime /// Tantivy DateTime
pub type DateTime = chrono::DateTime<chrono::Utc>; pub type DateTime = chrono::DateTime<chrono::Utc>;
mod common; pub use tantivy_common as common;
pub use tantivy_schema as schema;
pub use tantivy_tokenizer as tokenizer;
mod core; mod core;
mod indexer; mod indexer;
#[allow(unused_doc_comments)]
mod error;
pub mod tokenizer;
pub mod collector; pub mod collector;
pub mod directory; pub mod directory;
#[allow(unused_doc_comments)]
mod error;
pub mod fastfield; pub mod fastfield;
pub mod fieldnorm; pub mod fieldnorm;
pub(crate) mod positions; pub(crate) mod positions;
pub mod postings; pub mod postings;
pub mod query; pub mod query;
pub mod schema;
pub mod space_usage; pub mod space_usage;
pub mod store; pub mod store;
pub mod termdict; pub mod termdict;
@@ -212,13 +213,15 @@ pub type Score = f32;
pub type SegmentLocalId = u32; pub type SegmentLocalId = u32;
impl DocAddress { impl DocAddress {
/// Return the segment ordinal id that identifies the segment /// Return the segment ordinal.
/// hosting the document in the `Searcher` it is called from. /// The segment ordinal is an id identifying the segment
/// hosting the document. It is only meaningful, in the context
/// of a searcher.
pub fn segment_ord(self) -> SegmentLocalId { pub fn segment_ord(self) -> SegmentLocalId {
self.0 self.0
} }
/// Return the segment-local `DocId` /// Return the segment local `DocId`
pub fn doc(self) -> DocId { pub fn doc(self) -> DocId {
self.1 self.1
} }
@@ -227,11 +230,11 @@ impl DocAddress {
/// `DocAddress` contains all the necessary information /// `DocAddress` contains all the necessary information
/// to identify a document given a `Searcher` object. /// to identify a document given a `Searcher` object.
/// ///
/// It consists of an id identifying its segment, and /// It consists in an id identifying its segment, and
/// a segment-local `DocId`. /// its segment-local `DocId`.
/// ///
/// The id used for the segment is actually an ordinal /// The id used for the segment is actually an ordinal
/// in the list of `Segment`s held by a `Searcher`. /// in the list of segment hold by a `Searcher`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct DocAddress(pub SegmentLocalId, pub DocId); pub struct DocAddress(pub SegmentLocalId, pub DocId);
@@ -249,7 +252,6 @@ mod tests {
use crate::Postings; use crate::Postings;
use crate::ReloadPolicy; use crate::ReloadPolicy;
use rand::distributions::Bernoulli; use rand::distributions::Bernoulli;
use rand::distributions::Uniform;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
@@ -266,14 +268,6 @@ mod tests {
(a - b).abs() < 0.0005 * (a + b).abs() (a - b).abs() < 0.0005 * (a + b).abs()
} }
pub fn generate_nonunique_unsorted(max_value: u32, n_elems: usize) -> Vec<u32> {
let seed: [u8; 32] = [1; 32];
StdRng::from_seed(seed)
.sample_iter(&Uniform::new(0u32, max_value))
.take(n_elems)
.collect::<Vec<u32>>()
}
pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> { pub fn sample_with_seed(n: u32, ratio: f64, seed_val: u8) -> Vec<u32> {
StdRng::from_seed([seed_val; 32]) StdRng::from_seed([seed_val; 32])
.sample_iter(&Bernoulli::new(ratio).unwrap()) .sample_iter(&Bernoulli::new(ratio).unwrap())
@@ -283,10 +277,6 @@ mod tests {
.collect() .collect()
} }
pub fn sample(n: u32, ratio: f64) -> Vec<u32> {
sample_with_seed(n, ratio, 4)
}
#[test] #[test]
#[cfg(feature = "mmap")] #[cfg(feature = "mmap")]
fn test_indexing() { fn test_indexing() {

View File

@@ -274,15 +274,13 @@ pub mod tests {
mod bench { mod bench {
use super::*; use super::*;
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng; use rand::SeedableRng;
use rand::{Rng, XorShiftRng};
use test::Bencher; use test::Bencher;
fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> { fn generate_array_with_seed(n: usize, ratio: f64, seed_val: u8) -> Vec<u32> {
let mut seed: [u8; 32] = [0; 32]; let seed: &[u8; 16] = &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, seed_val];
seed[31] = seed_val; let mut rng: XorShiftRng = XorShiftRng::from_seed(*seed);
let mut rng = StdRng::from_seed(seed);
(0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect() (0u32..).filter(|_| rng.gen_bool(ratio)).take(n).collect()
} }

View File

@@ -356,9 +356,9 @@ pub mod tests {
#[test] #[test]
fn test_skip_next() { fn test_skip_next() {
let term_0 = Term::from_field_u64(Field::from_field_id(0), 0); let term_0 = Term::from_field_u64(Field(0), 0);
let term_1 = Term::from_field_u64(Field::from_field_id(0), 1); let term_1 = Term::from_field_u64(Field(0), 1);
let term_2 = Term::from_field_u64(Field::from_field_id(0), 2); let term_2 = Term::from_field_u64(Field(0), 2);
let num_docs = 300u32; let num_docs = 300u32;
@@ -511,19 +511,19 @@ pub mod tests {
} }
pub static TERM_A: Lazy<Term> = Lazy::new(|| { pub static TERM_A: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "a") Term::from_field_text(field, "a")
}); });
pub static TERM_B: Lazy<Term> = Lazy::new(|| { pub static TERM_B: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "b") Term::from_field_text(field, "b")
}); });
pub static TERM_C: Lazy<Term> = Lazy::new(|| { pub static TERM_C: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "c") Term::from_field_text(field, "c")
}); });
pub static TERM_D: Lazy<Term> = Lazy::new(|| { pub static TERM_D: Lazy<Term> = Lazy::new(|| {
let field = Field::from_field_id(0); let field = Field(0);
Term::from_field_text(field, "d") Term::from_field_text(field, "d")
}); });
@@ -622,23 +622,23 @@ pub mod tests {
assert!(!postings_unopt.advance()); assert!(!postings_unopt.advance());
} }
} }
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use super::tests::*; use super::tests::*;
use crate::docset::SkipResult; use docset::SkipResult;
use crate::query::Intersection; use query::Intersection;
use crate::schema::IndexRecordOption; use schema::IndexRecordOption;
use crate::tests;
use crate::DocSet;
use test::{self, Bencher}; use test::{self, Bencher};
use tests;
use DocSet;
#[bench] #[bench]
fn bench_segment_postings(b: &mut Bencher) { fn bench_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap(); let searcher = INDEX.searcher();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
b.iter(|| { b.iter(|| {
@@ -652,8 +652,7 @@ mod bench {
#[bench] #[bench]
fn bench_segment_intersection(b: &mut Bencher) { fn bench_segment_intersection(b: &mut Bencher) {
let reader = INDEX.reader().unwrap(); let searcher = INDEX.searcher();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
b.iter(|| { b.iter(|| {
let segment_postings_a = segment_reader let segment_postings_a = segment_reader
@@ -683,8 +682,7 @@ mod bench {
} }
fn bench_skip_next(p: f64, b: &mut Bencher) { fn bench_skip_next(p: f64, b: &mut Bencher) {
let reader = INDEX.reader().unwrap(); let searcher = INDEX.searcher();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
let docs = tests::sample(segment_reader.num_docs(), p); let docs = tests::sample(segment_reader.num_docs(), p);
@@ -739,8 +737,7 @@ mod bench {
#[bench] #[bench]
fn bench_iterate_segment_postings(b: &mut Bencher) { fn bench_iterate_segment_postings(b: &mut Bencher) {
let reader = INDEX.reader().unwrap(); let searcher = INDEX.searcher();
let searcher = reader.searcher();
let segment_reader = searcher.segment_reader(0); let segment_reader = searcher.segment_reader(0);
b.iter(|| { b.iter(|| {
let n: u32 = test::black_box(17); let n: u32 = test::black_box(17);

View File

@@ -12,7 +12,6 @@ use crate::tokenizer::TokenStream;
use crate::tokenizer::{Token, MAX_TOKEN_LEN}; use crate::tokenizer::{Token, MAX_TOKEN_LEN};
use crate::DocId; use crate::DocId;
use crate::Result; use crate::Result;
use fnv::FnvHashMap;
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
use std::marker::PhantomData; use std::marker::PhantomData;
@@ -61,12 +60,12 @@ fn make_field_partition(
.iter() .iter()
.map(|(key, _, _)| Term::wrap(key).field()) .map(|(key, _, _)| Term::wrap(key).field())
.enumerate(); .enumerate();
let mut prev_field_opt = None; let mut prev_field = Field(u32::max_value());
let mut fields = vec![]; let mut fields = vec![];
let mut offsets = vec![]; let mut offsets = vec![];
for (offset, field) in term_offsets_it { for (offset, field) in term_offsets_it {
if Some(field) != prev_field_opt { if field != prev_field {
prev_field_opt = Some(field); prev_field = field;
fields.push(field); fields.push(field);
offsets.push(offset); offsets.push(offset);
} }
@@ -86,7 +85,8 @@ impl MultiFieldPostingsWriter {
let term_index = TermHashMap::new(table_bits); let term_index = TermHashMap::new(table_bits);
let per_field_postings_writers: Vec<_> = schema let per_field_postings_writers: Vec<_> = schema
.fields() .fields()
.map(|(_, field_entry)| posting_from_field_entry(field_entry)) .iter()
.map(|field_entry| posting_from_field_entry(field_entry))
.collect(); .collect();
MultiFieldPostingsWriter { MultiFieldPostingsWriter {
heap: MemoryArena::new(), heap: MemoryArena::new(),
@@ -106,8 +106,7 @@ impl MultiFieldPostingsWriter {
field: Field, field: Field,
token_stream: &mut dyn TokenStream, token_stream: &mut dyn TokenStream,
) -> u32 { ) -> u32 {
let postings_writer = let postings_writer = self.per_field_postings_writers[field.0 as usize].deref_mut();
self.per_field_postings_writers[field.field_id() as usize].deref_mut();
postings_writer.index_text( postings_writer.index_text(
&mut self.term_index, &mut self.term_index,
doc, doc,
@@ -118,8 +117,7 @@ impl MultiFieldPostingsWriter {
} }
pub fn subscribe(&mut self, doc: DocId, term: &Term) -> UnorderedTermId { pub fn subscribe(&mut self, doc: DocId, term: &Term) -> UnorderedTermId {
let postings_writer = let postings_writer = self.per_field_postings_writers[term.field().0 as usize].deref_mut();
self.per_field_postings_writers[term.field().field_id() as usize].deref_mut();
postings_writer.subscribe(&mut self.term_index, doc, 0u32, term, &mut self.heap) postings_writer.subscribe(&mut self.term_index, doc, 0u32, term, &mut self.heap)
} }
@@ -129,12 +127,12 @@ impl MultiFieldPostingsWriter {
pub fn serialize( pub fn serialize(
&self, &self,
serializer: &mut InvertedIndexSerializer, serializer: &mut InvertedIndexSerializer,
) -> Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> { ) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> {
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> =
self.term_index.iter().collect(); self.term_index.iter().collect();
term_offsets.sort_unstable_by_key(|&(k, _, _)| k); term_offsets.sort_unstable_by_key(|&(k, _, _)| k);
let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> = let mut unordered_term_mappings: HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>> =
HashMap::new(); HashMap::new();
let field_offsets = make_field_partition(&term_offsets); let field_offsets = make_field_partition(&term_offsets);
@@ -149,7 +147,7 @@ impl MultiFieldPostingsWriter {
let unordered_term_ids = term_offsets[start..stop] let unordered_term_ids = term_offsets[start..stop]
.iter() .iter()
.map(|&(_, _, bucket)| bucket); .map(|&(_, _, bucket)| bucket);
let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids let mapping: HashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids
.enumerate() .enumerate()
.map(|(term_ord, unord_term_id)| { .map(|(term_ord, unord_term_id)| {
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal) (unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
@@ -161,7 +159,7 @@ impl MultiFieldPostingsWriter {
FieldType::Bytes => {} FieldType::Bytes => {}
} }
let postings_writer = &self.per_field_postings_writers[field.field_id() as usize]; let postings_writer = &self.per_field_postings_writers[field.0 as usize];
let mut field_serializer = let mut field_serializer =
serializer.new_field(field, postings_writer.total_num_tokens())?; serializer.new_field(field, postings_writer.total_num_tokens())?;
postings_writer.serialize( postings_writer.serialize(

View File

@@ -1,6 +1,6 @@
use super::TermInfo; use super::TermInfo;
use crate::common::CountingWriter;
use crate::common::{BinarySerializable, VInt}; use crate::common::{BinarySerializable, VInt};
use crate::common::{CompositeWrite, CountingWriter};
use crate::core::Segment; use crate::core::Segment;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::positions::PositionSerializer; use crate::positions::PositionSerializer;
@@ -10,6 +10,7 @@ use crate::postings::USE_SKIP_INFO_LIMIT;
use crate::schema::Schema; use crate::schema::Schema;
use crate::schema::{Field, FieldEntry, FieldType}; use crate::schema::{Field, FieldEntry, FieldType};
use crate::termdict::{TermDictionaryBuilder, TermOrdinal}; use crate::termdict::{TermDictionaryBuilder, TermOrdinal};
use crate::CompositeWrite;
use crate::DocId; use crate::DocId;
use crate::Result; use crate::Result;
use std::io::{self, Write}; use std::io::{self, Write};
@@ -141,7 +142,10 @@ impl<'a> FieldSerializer<'a> {
FieldType::Str(ref text_options) => { FieldType::Str(ref text_options) => {
if let Some(text_indexing_options) = text_options.get_indexing_options() { if let Some(text_indexing_options) = text_options.get_indexing_options() {
let index_option = text_indexing_options.index_option(); let index_option = text_indexing_options.index_option();
(index_option.has_freq(), index_option.has_positions()) (
index_option.is_termfreq_enabled(),
index_option.is_position_enabled(),
)
} else { } else {
(false, false) (false, false)
} }

View File

@@ -310,7 +310,6 @@ mod bench {
use super::super::MemoryArena; use super::super::MemoryArena;
use super::ExpUnrolledLinkedList; use super::ExpUnrolledLinkedList;
use byteorder::{NativeEndian, WriteBytesExt}; use byteorder::{NativeEndian, WriteBytesExt};
use std::iter;
use test::Bencher; use test::Bencher;
const NUM_STACK: usize = 10_000; const NUM_STACK: usize = 10_000;
@@ -336,10 +335,11 @@ mod bench {
fn bench_push_stack(bench: &mut Bencher) { fn bench_push_stack(bench: &mut Bencher) {
bench.iter(|| { bench.iter(|| {
let mut heap = MemoryArena::new(); let mut heap = MemoryArena::new();
let mut stacks: Vec<ExpUnrolledLinkedList> = let mut stacks = Vec::with_capacity(100);
iter::repeat_with(ExpUnrolledLinkedList::new) for _ in 0..NUM_STACK {
.take(NUM_STACK) let mut stack = ExpUnrolledLinkedList::new();
.collect(); stacks.push(stack);
}
for s in 0..NUM_STACK { for s in 0..NUM_STACK {
for i in 0u32..STACK_SIZE { for i in 0u32..STACK_SIZE {
let t = s * 392017 % NUM_STACK; let t = s * 392017 % NUM_STACK;

View File

@@ -45,7 +45,7 @@ impl BinarySerializable for TermInfo {
mod tests { mod tests {
use super::TermInfo; use super::TermInfo;
use crate::common::test::fixed_size_test; use crate::common::fixed_size_test;
#[test] #[test]
fn test_fixed_size() { fn test_fixed_size() {

View File

@@ -130,4 +130,5 @@ mod tests {
assert!(!scorer.advance()); assert!(!scorer.advance());
} }
} }
} }

View File

@@ -223,12 +223,13 @@ mod bench {
use super::BitSet; use super::BitSet;
use super::BitSetDocSet; use super::BitSetDocSet;
use crate::test; use test;
use crate::tests; use tests;
use crate::DocSet; use DocSet;
#[bench] #[bench]
fn bench_bitset_1pct_insert(b: &mut test::Bencher) { fn bench_bitset_1pct_insert(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000); let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
b.iter(|| { b.iter(|| {
let mut bitset = BitSet::with_max_value(1_000_000); let mut bitset = BitSet::with_max_value(1_000_000);
@@ -240,6 +241,7 @@ mod bench {
#[bench] #[bench]
fn bench_bitset_1pct_clone(b: &mut test::Bencher) { fn bench_bitset_1pct_clone(b: &mut test::Bencher) {
use tests;
let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000); let els = tests::generate_nonunique_unsorted(1_000_000u32, 10_000);
let mut bitset = BitSet::with_max_value(1_000_000); let mut bitset = BitSet::with_max_value(1_000_000);
for el in els { for el in els {

View File

@@ -137,4 +137,5 @@ mod tests {
fn test_idf() { fn test_idf() {
assert_nearly_equals(idf(1, 2), 0.6931472); assert_nearly_equals(idf(1, 2), 0.6931472);
} }
} }

View File

@@ -9,8 +9,7 @@ use crate::Result;
use crate::Searcher; use crate::Searcher;
use std::collections::BTreeSet; use std::collections::BTreeSet;
/// The boolean query returns a set of documents /// The boolean query combines a set of queries
/// that matches the Boolean combination of constituent subqueries.
/// ///
/// The documents matched by the boolean query are /// The documents matched by the boolean query are
/// those which /// those which
@@ -20,113 +19,6 @@ use std::collections::BTreeSet;
/// `MustNot` occurence. /// `MustNot` occurence.
/// * match at least one of the subqueries that is not /// * match at least one of the subqueries that is not
/// a `MustNot` occurence. /// a `MustNot` occurence.
///
///
/// You can combine other query types and their `Occur`ances into one `BooleanQuery`
///
/// ```rust
///use tantivy::collector::Count;
///use tantivy::doc;
///use tantivy::query::{BooleanQuery, Occur, PhraseQuery, Query, TermQuery};
///use tantivy::schema::{IndexRecordOption, Schema, TEXT};
///use tantivy::Term;
///use tantivy::{Index, Result};
///
///fn main() -> Result<()> {
/// let mut schema_builder = Schema::builder();
/// let title = schema_builder.add_text_field("title", TEXT);
/// let body = schema_builder.add_text_field("body", TEXT);
/// let schema = schema_builder.build();
/// let index = Index::create_in_ram(schema);
/// {
/// let mut index_writer = index.writer(3_000_000)?;
/// index_writer.add_document(doc!(
/// title => "The Name of the Wind",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of Muadib",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// body => "hidden",
/// ));
/// index_writer.add_document(doc!(
/// title => "A Dairy Cow",
/// body => "found",
/// ));
/// index_writer.add_document(doc!(
/// title => "The Diary of a Young Girl",
/// ));
/// index_writer.commit().unwrap();
/// }
///
/// let reader = index.reader()?;
/// let searcher = reader.searcher();
///
/// // Make TermQuery's for "girl" and "diary" in the title
/// let girl_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "girl"),
/// IndexRecordOption::Basic,
/// ));
/// let diary_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "diary"),
/// IndexRecordOption::Basic,
/// ));
/// // A TermQuery with "found" in the body
/// let body_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(body, "found"),
/// IndexRecordOption::Basic,
/// ));
/// // TermQuery "diary" must and "girl" must not be present
/// let queries_with_occurs1 = vec![
/// (Occur::Must, diary_term_query.box_clone()),
/// (Occur::MustNot, girl_term_query),
/// ];
/// // Make a BooleanQuery equivalent to
/// // title:+diary title:-girl
/// let diary_must_and_girl_mustnot = BooleanQuery::from(queries_with_occurs1);
/// let count1 = searcher.search(&diary_must_and_girl_mustnot, &Count)?;
/// assert_eq!(count1, 1);
///
/// // TermQuery for "cow" in the title
/// let cow_term_query: Box<dyn Query> = Box::new(TermQuery::new(
/// Term::from_field_text(title, "cow"),
/// IndexRecordOption::Basic,
/// ));
/// // "title:diary OR title:cow"
/// let title_diary_or_cow = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, cow_term_query),
/// ]);
/// let count2 = searcher.search(&title_diary_or_cow, &Count)?;
/// assert_eq!(count2, 4);
///
/// // Make a `PhraseQuery` from a vector of `Term`s
/// let phrase_query: Box<dyn Query> = Box::new(PhraseQuery::new(vec![
/// Term::from_field_text(title, "dairy"),
/// Term::from_field_text(title, "cow"),
/// ]));
/// // You can combine subqueries of different types into 1 BooleanQuery:
/// // `TermQuery` and `PhraseQuery`
/// // "title:diary OR "dairy cow"
/// let term_of_phrase_query = BooleanQuery::from(vec![
/// (Occur::Should, diary_term_query.box_clone()),
/// (Occur::Should, phrase_query.box_clone()),
/// ]);
/// let count3 = searcher.search(&term_of_phrase_query, &Count)?;
/// assert_eq!(count3, 4);
///
/// // You can nest one BooleanQuery inside another
/// // body:found AND ("title:diary OR "dairy cow")
/// let nested_query = BooleanQuery::from(vec![
/// (Occur::Must, body_term_query),
/// (Occur::Must, Box::new(term_of_phrase_query))
/// ]);
/// let count4 = searcher.search(&nested_query, &Count)?;
/// assert_eq!(count4, 1);
/// Ok(())
///}
/// ```
#[derive(Debug)] #[derive(Debug)]
pub struct BooleanQuery { pub struct BooleanQuery {
subqueries: Vec<(Occur, Box<dyn Query>)>, subqueries: Vec<(Occur, Box<dyn Query>)>,

View File

@@ -247,7 +247,9 @@ mod tests {
let reader = index.reader().unwrap(); let reader = index.reader().unwrap();
let searcher = reader.searcher(); let searcher = reader.searcher();
let query_parser = QueryParser::for_index(&index, vec![title, text]); let query_parser = QueryParser::for_index(&index, vec![title, text]);
let query = query_parser.parse_query("Оксана Лифенко").unwrap(); let query = query_parser
.parse_query("Оксана Лифенко")
.unwrap();
let weight = query.weight(&searcher, true).unwrap(); let weight = query.weight(&searcher, true).unwrap();
let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap(); let mut scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
scorer.advance(); scorer.advance();

View File

@@ -175,4 +175,5 @@ mod tests {
sample_skip, sample_skip,
); );
} }
} }

View File

@@ -45,7 +45,7 @@ pub fn intersect_scorers(mut scorers: Vec<Box<dyn Scorer>>) -> Box<dyn Scorer> {
}) })
} }
/// Creates a `DocSet` that iterate through the intersection of two or more `DocSet`s. /// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>> { pub struct Intersection<TDocSet: DocSet, TOtherDocSet: DocSet = Box<dyn Scorer>> {
left: TDocSet, left: TDocSet,
right: TDocSet, right: TDocSet,

View File

@@ -5,7 +5,7 @@ use Score;
use SkipResult; use SkipResult;
/// Creates a `DocSet` that iterate through the intersection of two `DocSet`s. /// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct IntersectionTwoTerms<TDocSet> { pub struct IntersectionTwoTerms<TDocSet> {
left: TDocSet, left: TDocSet,
right: TDocSet right: TDocSet

View File

@@ -12,6 +12,7 @@ mod exclude;
mod explanation; mod explanation;
mod fuzzy_query; mod fuzzy_query;
mod intersection; mod intersection;
mod occur;
mod phrase_query; mod phrase_query;
mod query; mod query;
mod query_parser; mod query_parser;
@@ -42,6 +43,7 @@ pub use self::exclude::Exclude;
pub use self::explanation::Explanation; pub use self::explanation::Explanation;
pub use self::fuzzy_query::FuzzyTermQuery; pub use self::fuzzy_query::FuzzyTermQuery;
pub use self::intersection::intersect_scorers; pub use self::intersection::intersect_scorers;
pub use self::occur::Occur;
pub use self::phrase_query::PhraseQuery; pub use self::phrase_query::PhraseQuery;
pub use self::query::Query; pub use self::query::Query;
pub use self::query_parser::QueryParser; pub use self::query_parser::QueryParser;
@@ -53,7 +55,6 @@ pub use self::scorer::ConstScorer;
pub use self::scorer::Scorer; pub use self::scorer::Scorer;
pub use self::term_query::TermQuery; pub use self::term_query::TermQuery;
pub use self::weight::Weight; pub use self::weight::Weight;
pub use tantivy_query_grammar::Occur;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {

View File

@@ -1,8 +1,5 @@
use std::fmt;
use std::fmt::Write;
/// Defines whether a term in a query must be present, /// Defines whether a term in a query must be present,
/// should be present or must be not present. /// should be present or must not be present.
#[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)] #[derive(Debug, Clone, Hash, Copy, Eq, PartialEq)]
pub enum Occur { pub enum Occur {
/// For a given document to be considered for scoring, /// For a given document to be considered for scoring,
@@ -21,38 +18,32 @@ impl Occur {
/// - `Should` => '?', /// - `Should` => '?',
/// - `Must` => '+' /// - `Must` => '+'
/// - `Not` => '-' /// - `Not` => '-'
fn to_char(self) -> char { pub fn to_char(self) -> char {
match self { match self {
Occur::Should => '?', Occur::Should => '?',
Occur::Must => '+', Occur::Must => '+',
Occur::MustNot => '-', Occur::MustNot => '-',
} }
} }
}
/// Compose two occur values. /// Compose two occur values.
pub fn compose(left: Occur, right: Occur) -> Occur { pub fn compose_occur(left: Occur, right: Occur) -> Occur {
match left { match left {
Occur::Should => right, Occur::Should => right,
Occur::Must => { Occur::Must => {
if right == Occur::MustNot { if right == Occur::MustNot {
Occur::MustNot Occur::MustNot
} else { } else {
Occur::Must Occur::Must
}
} }
Occur::MustNot => { }
if right == Occur::MustNot { Occur::MustNot => {
Occur::Must if right == Occur::MustNot {
} else { Occur::Must
Occur::MustNot } else {
} Occur::MustNot
} }
} }
} }
} }
impl fmt::Display for Occur {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_char(self.to_char())
}
}

View File

@@ -40,7 +40,7 @@ impl PhraseQuery {
PhraseQuery::new_with_offset(terms_with_offset) PhraseQuery::new_with_offset(terms_with_offset)
} }
/// Creates a new `PhraseQuery` given a list of terms and their offsets. /// Creates a new `PhraseQuery` given a list of terms and there offsets.
/// ///
/// Can be used to provide custom offset for each term. /// Can be used to provide custom offset for each term.
pub fn new_with_offset(mut terms: Vec<(usize, Term)>) -> PhraseQuery { pub fn new_with_offset(mut terms: Vec<(usize, Term)>) -> PhraseQuery {
@@ -73,7 +73,7 @@ impl PhraseQuery {
.collect::<Vec<Term>>() .collect::<Vec<Term>>()
} }
/// Returns the `PhraseWeight` for the given phrase query given a specific `searcher`. /// Returns the `PhraseWeight` for the given phrase query given a specific `searcher`.
/// ///
/// This function is the same as `.weight(...)` except it returns /// This function is the same as `.weight(...)` except it returns
/// a specialized type `PhraseWeight` instead of a Boxed trait. /// a specialized type `PhraseWeight` instead of a Boxed trait.

View File

@@ -1,4 +1,6 @@
mod query_grammar;
mod query_parser; mod query_parser;
mod user_input_ast;
pub mod logical_ast; pub mod logical_ast;
pub use self::query_parser::QueryParser; pub use self::query_parser::QueryParser;

View File

@@ -1,5 +1,6 @@
use super::user_input_ast::*; use super::user_input_ast::*;
use crate::Occur; use crate::query::occur::Occur;
use crate::query::query_parser::user_input_ast::UserInputBound;
use combine::char::*; use combine::char::*;
use combine::error::StreamError; use combine::error::StreamError;
use combine::stream::StreamErrorFor; use combine::stream::StreamErrorFor;

View File

@@ -1,5 +1,9 @@
use super::logical_ast::*; use super::logical_ast::*;
use super::query_grammar::parse_to_ast;
use super::user_input_ast::*;
use crate::core::Index; use crate::core::Index;
use crate::query::occur::compose_occur;
use crate::query::query_parser::logical_ast::LogicalAST;
use crate::query::AllQuery; use crate::query::AllQuery;
use crate::query::BooleanQuery; use crate::query::BooleanQuery;
use crate::query::EmptyQuery; use crate::query::EmptyQuery;
@@ -12,11 +16,11 @@ use crate::schema::IndexRecordOption;
use crate::schema::{Field, Schema}; use crate::schema::{Field, Schema};
use crate::schema::{FieldType, Term}; use crate::schema::{FieldType, Term};
use crate::tokenizer::TokenizerManager; use crate::tokenizer::TokenizerManager;
use combine::Parser;
use std::borrow::Cow; use std::borrow::Cow;
use std::num::{ParseFloatError, ParseIntError}; use std::num::{ParseFloatError, ParseIntError};
use std::ops::Bound; use std::ops::Bound;
use std::str::FromStr; use std::str::FromStr;
use tantivy_query_grammar::{UserInputAST, UserInputBound, UserInputLeaf};
/// Possible error that may happen when parsing a query. /// Possible error that may happen when parsing a query.
#[derive(Debug, PartialEq, Eq, Fail)] #[derive(Debug, PartialEq, Eq, Fail)]
@@ -218,8 +222,9 @@ impl QueryParser {
/// Parse the user query into an AST. /// Parse the user query into an AST.
fn parse_query_to_logical_ast(&self, query: &str) -> Result<LogicalAST, QueryParserError> { fn parse_query_to_logical_ast(&self, query: &str) -> Result<LogicalAST, QueryParserError> {
let user_input_ast = let (user_input_ast, _remaining) = parse_to_ast()
tantivy_query_grammar::parse_query(query).map_err(|_| QueryParserError::SyntaxError)?; .parse(query)
.map_err(|_| QueryParserError::SyntaxError)?;
self.compute_logical_ast(user_input_ast) self.compute_logical_ast(user_input_ast)
} }
@@ -394,7 +399,7 @@ impl QueryParser {
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new(); let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
for sub_query in sub_queries { for sub_query in sub_queries {
let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?; let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?;
let new_occur = Occur::compose(default_occur, occur); let new_occur = compose_occur(default_occur, occur);
logical_sub_queries.push((new_occur, sub_ast)); logical_sub_queries.push((new_occur, sub_ast));
} }
Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries))) Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries)))
@@ -402,7 +407,7 @@ impl QueryParser {
UserInputAST::Unary(left_occur, subquery) => { UserInputAST::Unary(left_occur, subquery) => {
let (right_occur, logical_sub_queries) = let (right_occur, logical_sub_queries) =
self.compute_logical_ast_with_occur(*subquery)?; self.compute_logical_ast_with_occur(*subquery)?;
Ok((Occur::compose(left_occur, right_occur), logical_sub_queries)) Ok((compose_occur(left_occur, right_occur), logical_sub_queries))
} }
UserInputAST::Leaf(leaf) => { UserInputAST::Leaf(leaf) => {
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?; let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
@@ -674,19 +679,13 @@ mod test {
test_parse_query_to_logical_ast_helper( test_parse_query_to_logical_ast_helper(
"signed:-2324", "signed:-2324",
&format!( &format!("{:?}", Term::from_field_i64(Field(2u32), -2324)),
"{:?}",
Term::from_field_i64(Field::from_field_id(2u32), -2324)
),
false, false,
); );
test_parse_query_to_logical_ast_helper( test_parse_query_to_logical_ast_helper(
"float:2.5", "float:2.5",
&format!( &format!("{:?}", Term::from_field_f64(Field(10u32), 2.5)),
"{:?}",
Term::from_field_f64(Field::from_field_id(10u32), 2.5)
),
false, false,
); );
} }

View File

@@ -1,7 +1,7 @@
use std::fmt; use std::fmt;
use std::fmt::{Debug, Formatter}; use std::fmt::{Debug, Formatter};
use crate::Occur; use crate::query::Occur;
#[derive(PartialEq)] #[derive(PartialEq)]
pub enum UserInputLeaf { pub enum UserInputLeaf {
@@ -151,7 +151,7 @@ impl fmt::Debug for UserInputAST {
Ok(()) Ok(())
} }
UserInputAST::Unary(ref occur, ref subquery) => { UserInputAST::Unary(ref occur, ref subquery) => {
write!(formatter, "{}({:?})", occur, subquery) write!(formatter, "{}({:?})", occur.to_char(), subquery)
} }
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery), UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
} }

View File

@@ -479,4 +479,5 @@ mod tests {
91 91
); );
} }
} }

View File

@@ -190,4 +190,5 @@ mod tests {
skip_docs, skip_docs,
); );
} }
} }

View File

@@ -118,7 +118,7 @@ mod tests {
#[test] #[test]
fn test_term_query_debug() { fn test_term_query_debug() {
let term_query = TermQuery::new( let term_query = TermQuery::new(
Term::from_field_text(Field::from_field_id(1), "hello"), Term::from_field_text(Field(1), "hello"),
IndexRecordOption::WithFreqs, IndexRecordOption::WithFreqs,
); );
assert_eq!( assert_eq!(

View File

@@ -28,7 +28,7 @@ where
} }
} }
/// Creates a `DocSet` that iterate through the union of two or more `DocSet`s. /// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct Union<TScorer, TScoreCombiner = DoNothingCombiner> { pub struct Union<TScorer, TScoreCombiner = DoNothingCombiner> {
docsets: Vec<TScorer>, docsets: Vec<TScorer>,
bitsets: Box<[TinySet; HORIZON_NUM_TINYBITSETS]>, bitsets: Box<[TinySet; HORIZON_NUM_TINYBITSETS]>,
@@ -409,17 +409,20 @@ mod tests {
vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000], vec![1, 2, 3, 7, 8, 9, 99, 100, 101, 500, 20000],
); );
} }
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use crate::query::score_combiner::DoNothingCombiner; use query::score_combiner::DoNothingCombiner;
use crate::query::{ConstScorer, Union, VecDocSet}; use query::ConstScorer;
use crate::tests; use query::Union;
use crate::DocId; use query::VecDocSet;
use crate::DocSet;
use test::Bencher; use test::Bencher;
use tests;
use DocId;
use DocSet;
#[bench] #[bench]
fn bench_union_3_high(bench: &mut Bencher) { fn bench_union_3_high(bench: &mut Bencher) {

View File

@@ -82,4 +82,5 @@ pub mod tests {
} }
assert_eq!(postings.fill_buffer(&mut buffer[..]), 9); assert_eq!(postings.fill_buffer(&mut buffer[..]), 9);
} }
} }

View File

@@ -1,32 +0,0 @@
use crate::common::BinarySerializable;
use std::io;
use std::io::Read;
use std::io::Write;
/// `Field` is represented by an unsigned 32-bit integer type
/// The schema holds the mapping between field names and `Field` objects.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
pub struct Field(u32);
impl Field {
/// Create a new field object for the given FieldId.
pub fn from_field_id(field_id: u32) -> Field {
Field(field_id)
}
/// Returns a u32 identifying uniquely a field within a schema.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn field_id(&self) -> u32 {
self.0
}
}
impl BinarySerializable for Field {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.0.serialize(writer)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Field> {
u32::deserialize(reader).map(Field)
}
}

View File

@@ -120,16 +120,17 @@ pub mod tests {
); );
} }
} }
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]
mod bench { mod bench {
use super::tests::write_lorem_ipsum_store; use super::tests::write_lorem_ipsum_store;
use crate::directory::Directory; use directory::Directory;
use crate::directory::RAMDirectory; use directory::RAMDirectory;
use crate::store::StoreReader;
use std::path::Path; use std::path::Path;
use store::StoreReader;
use test::Bencher; use test::Bencher;
#[bench] #[bench]

View File

@@ -165,4 +165,5 @@ mod tests {
assert_eq!(output.len(), 65); assert_eq!(output.len(), 65);
assert_eq!(output[0], 128u8 + 3u8); assert_eq!(output[0], 128u8 + 3u8);
} }
} }

View File

@@ -3,7 +3,6 @@ use super::skiplist::SkipListBuilder;
use super::StoreReader; use super::StoreReader;
use crate::common::CountingWriter; use crate::common::CountingWriter;
use crate::common::{BinarySerializable, VInt}; use crate::common::{BinarySerializable, VInt};
use crate::directory::TerminatingWrite;
use crate::directory::WritePtr; use crate::directory::WritePtr;
use crate::schema::Document; use crate::schema::Document;
use crate::DocId; use crate::DocId;
@@ -110,6 +109,6 @@ impl StoreWriter {
self.offset_index_writer.write(&mut self.writer)?; self.offset_index_writer.write(&mut self.writer)?;
header_offset.serialize(&mut self.writer)?; header_offset.serialize(&mut self.writer)?;
self.doc.serialize(&mut self.writer)?; self.doc.serialize(&mut self.writer)?;
self.writer.terminate() self.writer.flush()
} }
} }

View File

@@ -268,7 +268,7 @@ mod tests {
#[test] #[test]
fn test_term_info_block() { fn test_term_info_block() {
common::test::fixed_size_test::<TermInfoBlockMeta>(); common::fixed_size_test::<TermInfoBlockMeta>();
} }
#[test] #[test]
@@ -328,4 +328,5 @@ mod tests {
assert_eq!(term_info_store.get(i as u64), term_infos[i]); assert_eq!(term_info_store.get(i as u64), term_infos[i]);
} }
} }
} }

View File

@@ -1,189 +0,0 @@
use crate::tokenizer::{Token, TokenStream, TokenStreamChain};
use std::cmp::Ordering;
/// Struct representing pre-tokenized text
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct PreTokenizedString {
/// Original text
pub text: String,
/// Tokens derived from the text
pub tokens: Vec<Token>,
}
impl Ord for PreTokenizedString {
fn cmp(&self, other: &Self) -> Ordering {
self.text.cmp(&other.text)
}
}
impl PartialOrd for PreTokenizedString {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// TokenStream implementation which wraps PreTokenizedString
pub struct PreTokenizedStream {
tokenized_string: PreTokenizedString,
current_token: i64,
}
impl From<PreTokenizedString> for PreTokenizedStream {
fn from(s: PreTokenizedString) -> PreTokenizedStream {
PreTokenizedStream {
tokenized_string: s,
current_token: -1,
}
}
}
impl PreTokenizedStream {
/// Creates a TokenStream from PreTokenizedString array
pub fn chain_tokenized_strings<'a>(
tok_strings: &'a [&'a PreTokenizedString],
) -> Box<dyn TokenStream + 'a> {
if tok_strings.len() == 1 {
Box::new(PreTokenizedStream::from((*tok_strings[0]).clone()))
} else {
let mut offsets = vec![];
let mut total_offset = 0;
for &tok_string in tok_strings {
offsets.push(total_offset);
if let Some(last_token) = tok_string.tokens.last() {
total_offset += last_token.offset_to;
}
}
let token_streams: Vec<_> = tok_strings
.iter()
.map(|tok_string| PreTokenizedStream::from((*tok_string).clone()))
.collect();
Box::new(TokenStreamChain::new(offsets, token_streams))
}
}
}
impl TokenStream for PreTokenizedStream {
fn advance(&mut self) -> bool {
self.current_token += 1;
self.current_token < self.tokenized_string.tokens.len() as i64
}
fn token(&self) -> &Token {
assert!(
self.current_token >= 0,
"TokenStream not initialized. You should call advance() at least once."
);
&self.tokenized_string.tokens[self.current_token as usize]
}
fn token_mut(&mut self) -> &mut Token {
assert!(
self.current_token >= 0,
"TokenStream not initialized. You should call advance() at least once."
);
&mut self.tokenized_string.tokens[self.current_token as usize]
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::Token;
#[test]
fn test_tokenized_stream() {
let tok_text = PreTokenizedString {
text: String::from("A a"),
tokens: vec![
Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 2,
offset_to: 3,
position: 1,
text: String::from("a"),
position_length: 1,
},
],
};
let mut token_stream = PreTokenizedStream::from(tok_text.clone());
for expected_token in tok_text.tokens {
assert!(token_stream.advance());
assert_eq!(token_stream.token(), &expected_token);
}
assert!(!token_stream.advance());
}
#[test]
fn test_chain_tokenized_strings() {
let tok_text = PreTokenizedString {
text: String::from("A a"),
tokens: vec![
Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 2,
offset_to: 3,
position: 1,
text: String::from("a"),
position_length: 1,
},
],
};
let chain_parts = vec![&tok_text, &tok_text];
let mut token_stream = PreTokenizedStream::chain_tokenized_strings(&chain_parts[..]);
let expected_tokens = vec![
Token {
offset_from: 0,
offset_to: 1,
position: 0,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 2,
offset_to: 3,
position: 1,
text: String::from("a"),
position_length: 1,
},
Token {
offset_from: 3,
offset_to: 4,
position: 3,
text: String::from("A"),
position_length: 1,
},
Token {
offset_from: 5,
offset_to: 6,
position: 4,
text: String::from("a"),
position_length: 1,
},
];
for expected_token in expected_tokens {
assert!(token_stream.advance());
assert_eq!(token_stream.token(), &expected_token);
}
assert!(!token_stream.advance());
}
}

10
tantivy-common/Cargo.toml Normal file
View File

@@ -0,0 +1,10 @@
[package]
name = "tantivy-common"
version = "0.1.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
edition = "2018"
workspace = ".."
[dependencies]
byteorder = "*"
chrono = "*"

View File

@@ -2,7 +2,7 @@ use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use std::io; use std::io;
use std::ops::Deref; use std::ops::Deref;
pub(crate) struct BitPacker { pub struct BitPacker {
mini_buffer: u64, mini_buffer: u64,
mini_buffer_written: usize, mini_buffer_written: usize,
} }

View File

@@ -2,7 +2,7 @@ use std::fmt;
use std::u64; use std::u64;
#[derive(Clone, Copy, Eq, PartialEq)] #[derive(Clone, Copy, Eq, PartialEq)]
pub(crate) struct TinySet(u64); pub struct TinySet(u64);
impl fmt::Debug for TinySet { impl fmt::Debug for TinySet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -179,7 +179,7 @@ impl BitSet {
/// ///
/// Reminder: the tiny set with the bucket `bucket`, represents the /// Reminder: the tiny set with the bucket `bucket`, represents the
/// elements from `bucket * 64` to `(bucket+1) * 64`. /// elements from `bucket * 64` to `(bucket+1) * 64`.
pub(crate) fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> { pub fn first_non_empty_bucket(&self, bucket: u32) -> Option<u32> {
self.tinysets[bucket as usize..] self.tinysets[bucket as usize..]
.iter() .iter()
.cloned() .cloned()
@@ -194,7 +194,7 @@ impl BitSet {
/// Returns the tiny bitset representing the /// Returns the tiny bitset representing the
/// the set restricted to the number range from /// the set restricted to the number range from
/// `bucket * 64` to `(bucket + 1) * 64`. /// `bucket * 64` to `(bucket + 1) * 64`.
pub(crate) fn tinyset(&self, bucket: u32) -> TinySet { pub fn tinyset(&self, bucket: u32) -> TinySet {
self.tinysets[bucket as usize] self.tinysets[bucket as usize]
} }
} }
@@ -204,12 +204,7 @@ mod tests {
use super::BitSet; use super::BitSet;
use super::TinySet; use super::TinySet;
use crate::docset::DocSet; use std::collections::{BTreeSet, HashSet};
use crate::query::BitSetDocSet;
use crate::tests;
use crate::tests::generate_nonunique_unsorted;
use std::collections::BTreeSet;
use std::collections::HashSet;
#[test] #[test]
fn test_tiny_set() { fn test_tiny_set() {
@@ -264,26 +259,19 @@ mod tests {
test_against_hashset(&[62u32, 63u32], 64); test_against_hashset(&[62u32, 63u32], 64);
} }
#[test] // #[test]
fn test_bitset_large() { // fn test_bitset_clear() {
let arr = generate_nonunique_unsorted(100_000, 5_000); // let mut bitset = BitSet::with_max_value(1_000);
let mut btreeset: BTreeSet<u32> = BTreeSet::new(); // let els = tests::sample(1_000, 0.01f64);
let mut bitset = BitSet::with_max_value(100_000); // for &el in &els {
for el in arr { // bitset.insert(el);
btreeset.insert(el); // }
bitset.insert(el); // assert!(els.iter().all(|el| bitset.contains(*el)));
} // bitset.clear();
for i in 0..100_000 { // for el in 0u32..1000u32 {
assert_eq!(btreeset.contains(&i), bitset.contains(i)); // assert!(!bitset.contains(el));
} // }
assert_eq!(btreeset.len(), bitset.len()); // }
let mut bitset_docset = BitSetDocSet::from(bitset);
for el in btreeset.into_iter() {
bitset_docset.advance();
assert_eq!(bitset_docset.doc(), el);
}
assert!(!bitset_docset.advance());
}
#[test] #[test]
fn test_bitset_num_buckets() { fn test_bitset_num_buckets() {
@@ -339,19 +327,6 @@ mod tests {
assert_eq!(bitset.len(), 3); assert_eq!(bitset.len(), 3);
} }
#[test]
fn test_bitset_clear() {
let mut bitset = BitSet::with_max_value(1_000);
let els = tests::sample(1_000, 0.01f64);
for &el in &els {
bitset.insert(el);
}
assert!(els.iter().all(|el| bitset.contains(*el)));
bitset.clear();
for el in 0u32..1000u32 {
assert!(!bitset.contains(el));
}
}
} }
#[cfg(all(test, feature = "unstable"))] #[cfg(all(test, feature = "unstable"))]

View File

@@ -0,0 +1,235 @@
use crate::common::BinarySerializable;
use crate::common::CountingWriter;
use crate::common::VInt;
use crate::directory::ReadOnlySource;
use crate::directory::WritePtr;
use crate::schema::Field;
use crate::space_usage::FieldUsage;
use crate::space_usage::PerFieldSpaceUsage;
use std::collections::HashMap;
use std::io::Write;
use std::io::{self, Read};
#[derive(Eq, PartialEq, Hash, Copy, Ord, PartialOrd, Clone, Debug)]
pub struct FileAddr {
field: Field,
idx: usize,
}
impl FileAddr {
fn new(field: Field, idx: usize) -> FileAddr {
FileAddr { field, idx }
}
}
impl BinarySerializable for FileAddr {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.field.serialize(writer)?;
VInt(self.idx as u64).serialize(writer)?;
Ok(())
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
let field = Field::deserialize(reader)?;
let idx = VInt::deserialize(reader)?.0 as usize;
Ok(FileAddr { field, idx })
}
}
/// A `CompositeWrite` is used to write a `CompositeFile`.
pub struct CompositeWrite<W = WritePtr> {
write: CountingWriter<W>,
offsets: HashMap<FileAddr, u64>,
}
impl<W: Write> CompositeWrite<W> {
/// Crate a new API writer that writes a composite file
/// in a given write.
pub fn wrap(w: W) -> CompositeWrite<W> {
CompositeWrite {
write: CountingWriter::wrap(w),
offsets: HashMap::new(),
}
}
/// Start writing a new field.
pub fn for_field(&mut self, field: Field) -> &mut CountingWriter<W> {
self.for_field_with_idx(field, 0)
}
/// Start writing a new field.
pub fn for_field_with_idx(&mut self, field: Field, idx: usize) -> &mut CountingWriter<W> {
let offset = self.write.written_bytes();
let file_addr = FileAddr::new(field, idx);
assert!(!self.offsets.contains_key(&file_addr));
self.offsets.insert(file_addr, offset);
&mut self.write
}
/// Close the composite file
///
/// An index of the different field offsets
/// will be written as a footer.
pub fn close(mut self) -> io::Result<()> {
let footer_offset = self.write.written_bytes();
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
let mut offset_fields: Vec<_> = self
.offsets
.iter()
.map(|(file_addr, offset)| (*offset, *file_addr))
.collect();
offset_fields.sort();
let mut prev_offset = 0;
for (offset, file_addr) in offset_fields {
VInt((offset - prev_offset) as u64).serialize(&mut self.write)?;
file_addr.serialize(&mut self.write)?;
prev_offset = offset;
}
let footer_len = (self.write.written_bytes() - footer_offset) as u32;
footer_len.serialize(&mut self.write)?;
self.write.flush()?;
Ok(())
}
}
/// A composite file is an abstraction to store a
/// file partitioned by field.
///
/// The file needs to be written field by field.
/// A footer describes the start and stop offsets
/// for each field.
#[derive(Clone)]
pub struct CompositeFile {
data: ReadOnlySource,
offsets_index: HashMap<FileAddr, (usize, usize)>,
}
impl CompositeFile {
/// Opens a composite file stored in a given
/// `ReadOnlySource`.
pub fn open(data: &ReadOnlySource) -> io::Result<CompositeFile> {
let end = data.len();
let footer_len_data = data.slice_from(end - 4);
let footer_len = u32::deserialize(&mut footer_len_data.as_slice())? as usize;
let footer_start = end - 4 - footer_len;
let footer_data = data.slice(footer_start, footer_start + footer_len);
let mut footer_buffer = footer_data.as_slice();
let num_fields = VInt::deserialize(&mut footer_buffer)?.0 as usize;
let mut file_addrs = vec![];
let mut offsets = vec![];
let mut field_index = HashMap::new();
let mut offset = 0;
for _ in 0..num_fields {
offset += VInt::deserialize(&mut footer_buffer)?.0 as usize;
let file_addr = FileAddr::deserialize(&mut footer_buffer)?;
offsets.push(offset);
file_addrs.push(file_addr);
}
offsets.push(footer_start);
for i in 0..num_fields {
let file_addr = file_addrs[i];
let start_offset = offsets[i];
let end_offset = offsets[i + 1];
field_index.insert(file_addr, (start_offset, end_offset));
}
Ok(CompositeFile {
data: data.slice_to(footer_start),
offsets_index: field_index,
})
}
/// Returns a composite file that stores
/// no fields.
pub fn empty() -> CompositeFile {
CompositeFile {
offsets_index: HashMap::new(),
data: ReadOnlySource::empty(),
}
}
/// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read(&self, field: Field) -> Option<ReadOnlySource> {
self.open_read_with_idx(field, 0)
}
/// Returns the `ReadOnlySource` associated
/// to a given `Field` and stored in a `CompositeFile`.
pub fn open_read_with_idx(&self, field: Field, idx: usize) -> Option<ReadOnlySource> {
self.offsets_index
.get(&FileAddr { field, idx })
.map(|&(from, to)| self.data.slice(from, to))
}
pub fn space_usage(&self) -> PerFieldSpaceUsage {
let mut fields = HashMap::new();
for (&field_addr, &(start, end)) in self.offsets_index.iter() {
fields
.entry(field_addr.field)
.or_insert_with(|| FieldUsage::empty(field_addr.field))
.add_field_idx(field_addr.idx, end - start);
}
PerFieldSpaceUsage::new(fields)
}
}
#[cfg(test)]
mod test {
use super::{CompositeFile, CompositeWrite};
use crate::common::BinarySerializable;
use crate::common::VInt;
use crate::directory::{Directory, RAMDirectory};
use crate::schema::Field;
use std::io::Write;
use std::path::Path;
#[test]
fn test_composite_file() {
let path = Path::new("test_path");
let mut directory = RAMDirectory::create();
{
let w = directory.open_write(path).unwrap();
let mut composite_write = CompositeWrite::wrap(w);
{
let mut write_0 = composite_write.for_field(Field(0u32));
VInt(32431123u64).serialize(&mut write_0).unwrap();
write_0.flush().unwrap();
}
{
let mut write_4 = composite_write.for_field(Field(4u32));
VInt(2).serialize(&mut write_4).unwrap();
write_4.flush().unwrap();
}
composite_write.close().unwrap();
}
{
let r = directory.open_read(path).unwrap();
let composite_file = CompositeFile::open(&r).unwrap();
{
let file0 = composite_file.open_read(Field(0u32)).unwrap();
let mut file0_buf = file0.as_slice();
let payload_0 = VInt::deserialize(&mut file0_buf).unwrap().0;
assert_eq!(file0_buf.len(), 0);
assert_eq!(payload_0, 32431123u64);
}
{
let file4 = composite_file.open_read(Field(4u32)).unwrap();
let mut file4_buf = file4.as_slice();
let payload_4 = VInt::deserialize(&mut file4_buf).unwrap().0;
assert_eq!(file4_buf.len(), 0);
assert_eq!(payload_4, 2u64);
}
}
}
}

View File

@@ -1,5 +1,3 @@
use crate::directory::AntiCallToken;
use crate::directory::TerminatingWrite;
use std::io; use std::io;
use std::io::Write; use std::io::Write;
@@ -44,13 +42,6 @@ impl<W: Write> Write for CountingWriter<W> {
} }
} }
impl<W: TerminatingWrite> TerminatingWrite for CountingWriter<W> {
fn terminate_ref(&mut self, token: AntiCallToken) -> io::Result<()> {
self.flush()?;
self.underlying.terminate_ref(token)
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {

View File

@@ -1,18 +1,18 @@
pub mod bitpacker; pub mod bitpacker;
mod bitset; mod bitset;
mod composite_file;
mod counting_writer; mod counting_writer;
mod serialize; mod serialize;
mod vint; mod vint;
pub use self::bitset::BitSet; pub use self::bitset::BitSet;
pub(crate) use self::bitset::TinySet; pub use self::bitset::TinySet;
pub(crate) use self::composite_file::{CompositeFile, CompositeWrite};
pub use self::counting_writer::CountingWriter; pub use self::counting_writer::CountingWriter;
pub use self::serialize::{BinarySerializable, FixedSize}; pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt}; pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
pub use byteorder::LittleEndian as Endianness; pub use byteorder::LittleEndian as Endianness;
pub type DateTime = chrono::DateTime<chrono::Utc>;
/// Segment's max doc must be `< MAX_DOC_LIMIT`. /// Segment's max doc must be `< MAX_DOC_LIMIT`.
/// ///
/// We do not allow segments with more than /// We do not allow segments with more than
@@ -42,7 +42,7 @@ pub const MAX_DOC_LIMIT: u32 = 1 << 31;
/// a very large range of values. Even in this case, it results /// a very large range of values. Even in this case, it results
/// in an extra cost of at most 12% compared to the optimal /// in an extra cost of at most 12% compared to the optimal
/// number of bits. /// number of bits.
pub(crate) fn compute_num_bits(n: u64) -> u8 { pub fn compute_num_bits(n: u64) -> u8 {
let amplitude = (64u32 - n.leading_zeros()) as u8; let amplitude = (64u32 - n.leading_zeros()) as u8;
if amplitude <= 64 - 8 { if amplitude <= 64 - 8 {
amplitude amplitude
@@ -51,7 +51,7 @@ pub(crate) fn compute_num_bits(n: u64) -> u8 {
} }
} }
pub(crate) fn is_power_of_2(n: usize) -> bool { pub fn is_power_of_2(n: usize) -> bool {
(n > 0) && (n & (n - 1) == 0) (n > 0) && (n & (n - 1) == 0)
} }
@@ -131,10 +131,12 @@ pub fn u64_to_f64(val: u64) -> f64 {
}) })
} }
pub use self::serialize::fixed_size_test;
#[cfg(test)] #[cfg(test)]
pub(crate) mod test { pub(crate) mod test {
pub use super::serialize::test::fixed_size_test; use super::fixed_size_test;
use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64}; use super::{compute_num_bits, f64_to_u64, i64_to_u64, u64_to_f64, u64_to_i64};
use std::f64; use std::f64;

View File

@@ -1,5 +1,5 @@
use crate::common::Endianness; use crate::Endianness;
use crate::common::VInt; use crate::VInt;
use byteorder::{ReadBytesExt, WriteBytesExt}; use byteorder::{ReadBytesExt, WriteBytesExt};
use std::fmt; use std::fmt;
use std::io; use std::io;
@@ -145,17 +145,17 @@ impl BinarySerializable for String {
} }
} }
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
}
#[cfg(test)] #[cfg(test)]
pub mod test { mod test {
use super::*; use super::*;
use crate::common::VInt; use crate::VInt;
pub fn fixed_size_test<O: BinarySerializable + FixedSize + Default>() {
let mut buffer = Vec::new();
O::default().serialize(&mut buffer).unwrap();
assert_eq!(buffer.len(), O::SIZE_IN_BYTES);
}
fn serialize_test<T: BinarySerializable + Eq>(v: T) -> usize { fn serialize_test<T: BinarySerializable + Eq>(v: T) -> usize {
let mut buffer: Vec<u8> = Vec::new(); let mut buffer: Vec<u8> = Vec::new();
@@ -199,7 +199,10 @@ pub mod test {
fn test_serialize_string() { fn test_serialize_string() {
assert_eq!(serialize_test(String::from("")), 1); assert_eq!(serialize_test(String::from("")), 1);
assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4); assert_eq!(serialize_test(String::from("ぽよぽよ")), 1 + 3 * 4);
assert_eq!(serialize_test(String::from("富士さん見える。")), 1 + 3 * 8); assert_eq!(
serialize_test(String::from("富士さん見える。")),
1 + 3 * 8
);
} }
#[test] #[test]

View File

@@ -171,7 +171,7 @@ mod tests {
use super::serialize_vint_u32; use super::serialize_vint_u32;
use super::VInt; use super::VInt;
use crate::common::BinarySerializable; use crate::BinarySerializable;
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
fn aux_test_vint(val: u64) { fn aux_test_vint(val: u64) {

33
tantivy-schema/Cargo.toml Normal file
View File

@@ -0,0 +1,33 @@
[package]
name = "tantivy-schema"
version = "0.1.0"
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
edition = "2018"
workspace = ".."
[dependencies]
base64 = "0.10.0"
byteorder = "1.0"
once_cell = "0.2"
regex = "1.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
num_cpus = "1.2"
itertools = "0.8"
notify = {version="4", optional=true}
crossbeam = "0.7"
owning_ref = "0.4"
stable_deref_trait = "1.0.0"
downcast-rs = { version="1.0" }
census = "0.2"
failure = "0.1"
fail = "0.3"
scoped-pool = "1.0"
tantivy-common = {path="../tantivy-common"}
chrono = "*"
[dev-dependencies]
matches = "0.1.8"

View File

@@ -1,10 +1,10 @@
use super::*; use super::*;
use crate::common::BinarySerializable;
use crate::common::VInt;
use crate::tokenizer::PreTokenizedString;
use crate::DateTime;
use itertools::Itertools; use itertools::Itertools;
use serde_derive::{Deserialize, Serialize};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use tantivy_common::BinarySerializable;
use tantivy_common::DateTime;
use tantivy_common::VInt;
/// Tantivy's Document is the object that can /// Tantivy's Document is the object that can
/// be indexed and then searched for. /// be indexed and then searched for.
@@ -30,8 +30,8 @@ impl From<Vec<FieldValue>> for Document {
impl PartialEq for Document { impl PartialEq for Document {
fn eq(&self, other: &Document) -> bool { fn eq(&self, other: &Document) -> bool {
// super slow, but only here for tests // super slow, but only here for tests
let mut self_field_values: Vec<&_> = self.field_values.iter().collect(); let mut self_field_values = self.field_values.clone();
let mut other_field_values: Vec<&_> = other.field_values.iter().collect(); let mut other_field_values = other.field_values.clone();
self_field_values.sort(); self_field_values.sort();
other_field_values.sort(); other_field_values.sort();
self_field_values.eq(&other_field_values) self_field_values.eq(&other_field_values)
@@ -79,16 +79,6 @@ impl Document {
self.add(FieldValue::new(field, value)); self.add(FieldValue::new(field, value));
} }
/// Add a pre-tokenized text field.
pub fn add_pre_tokenized_text(
&mut self,
field: Field,
pre_tokenized_text: &PreTokenizedString,
) {
let value = Value::PreTokStr(pre_tokenized_text.clone());
self.add(FieldValue::new(field, value));
}
/// Add a u64 field /// Add a u64 field
pub fn add_u64(&mut self, field: Field, value: u64) { pub fn add_u64(&mut self, field: Field, value: u64) {
self.add(FieldValue::new(field, Value::U64(value))); self.add(FieldValue::new(field, Value::U64(value)));
@@ -179,7 +169,7 @@ impl BinarySerializable for Document {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::schema::*; use crate::*;
#[test] #[test]
fn test_doc() { fn test_doc() {
@@ -189,4 +179,5 @@ mod tests {
doc.add_text(text_field, "My title"); doc.add_text(text_field, "My title");
assert_eq!(doc.field_values().len(), 1); assert_eq!(doc.field_values().len(), 1);
} }
} }

View File

@@ -1,4 +1,3 @@
use crate::common::BinarySerializable;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -8,6 +7,7 @@ use std::fmt::{self, Debug, Display, Formatter};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::str; use std::str;
use std::string::FromUtf8Error; use std::string::FromUtf8Error;
use tantivy_common::BinarySerializable;
const SLASH_BYTE: u8 = b'/'; const SLASH_BYTE: u8 = b'/';
const ESCAPE_BYTE: u8 = b'\\'; const ESCAPE_BYTE: u8 = b'\\';
@@ -59,7 +59,7 @@ impl Facet {
&self.0 &self.0
} }
pub(crate) fn from_encoded_string(facet_string: String) -> Facet { pub fn from_encoded_string(facet_string: String) -> Facet {
Facet(facet_string) Facet(facet_string)
} }
@@ -104,7 +104,7 @@ impl Facet {
} }
/// Accessor for the inner buffer of the `Facet`. /// Accessor for the inner buffer of the `Facet`.
pub(crate) fn set_facet_str(&mut self, facet_str: &str) { pub fn set_facet_str(&mut self, facet_str: &str) {
self.0.clear(); self.0.clear();
self.0.push_str(facet_str); self.0.push_str(facet_str);
} }

View File

@@ -0,0 +1,24 @@
use serde_derive::{Deserialize, Serialize};
use std::io;
use std::io::Read;
use std::io::Write;
use tantivy_common::BinarySerializable;
/// `Field` is actually a `u8` identifying a `Field`
/// The schema is in charge of holding mapping between field names
/// to `Field` objects.
///
/// Because the field id is a `u8`, tantivy can only have at most `255` fields.
/// Value 255 is reserved.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
pub struct Field(pub u32);
impl BinarySerializable for Field {
fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
self.0.serialize(writer)
}
fn deserialize<R: Read>(reader: &mut R) -> io::Result<Field> {
u32::deserialize(reader).map(Field)
}
}

View File

@@ -1,7 +1,8 @@
use crate::schema::IntOptions; use serde_derive::*;
use crate::schema::TextOptions;
use crate::schema::FieldType; use crate::FieldType;
use crate::IntOptions;
use crate::TextOptions;
use serde::de::{self, MapAccess, Visitor}; use serde::de::{self, MapAccess, Visitor};
use serde::ser::SerializeStruct; use serde::ser::SerializeStruct;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -265,7 +266,7 @@ impl<'de> Deserialize<'de> for FieldEntry {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::schema::TEXT; use crate::TEXT;
use serde_json; use serde_json;
#[test] #[test]

View File

@@ -1,11 +1,10 @@
use base64::decode; use base64::decode;
use crate::schema::Facet; use crate::Facet;
use crate::schema::IndexRecordOption; use crate::IndexRecordOption;
use crate::schema::TextFieldIndexing; use crate::TextFieldIndexing;
use crate::schema::Value; use crate::Value;
use crate::schema::{IntOptions, TextOptions}; use crate::{IntOptions, TextOptions};
use crate::tokenizer::PreTokenizedString;
use serde_json::Value as JsonValue; use serde_json::Value as JsonValue;
/// Possible error that may occur while parsing a field value /// Possible error that may occur while parsing a field value
@@ -169,28 +168,6 @@ impl FieldType {
Err(ValueParsingError::TypeError(msg)) Err(ValueParsingError::TypeError(msg))
} }
}, },
JsonValue::Object(_) => match *self {
FieldType::Str(_) => {
if let Ok(tok_str_val) =
serde_json::from_value::<PreTokenizedString>(json.clone())
{
Ok(Value::PreTokStr(tok_str_val))
} else {
let msg = format!(
"Json value {:?} cannot be translated to PreTokenizedString.",
json
);
Err(ValueParsingError::TypeError(msg))
}
}
_ => {
let msg = format!(
"Json value not supported error {:?}. Expected {:?}",
json, self
);
Err(ValueParsingError::TypeError(msg))
}
},
_ => { _ => {
let msg = format!( let msg = format!(
"Json value not supported error {:?}. Expected {:?}", "Json value not supported error {:?}. Expected {:?}",
@@ -205,10 +182,9 @@ impl FieldType {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::FieldType; use super::FieldType;
use crate::schema::field_type::ValueParsingError; use crate::field_type::ValueParsingError;
use crate::schema::TextOptions; use crate::Value;
use crate::schema::Value; use serde_json::json;
use crate::tokenizer::{PreTokenizedString, Token};
#[test] #[test]
fn test_bytes_value_from_json() { fn test_bytes_value_from_json() {
@@ -229,71 +205,4 @@ mod tests {
_ => panic!("Expected parse failure for invalid base64"), _ => panic!("Expected parse failure for invalid base64"),
} }
} }
#[test]
fn test_pre_tok_str_value_from_json() {
let pre_tokenized_string_json = r#"{
"text": "The Old Man",
"tokens": [
{
"offset_from": 0,
"offset_to": 3,
"position": 0,
"text": "The",
"position_length": 1
},
{
"offset_from": 4,
"offset_to": 7,
"position": 1,
"text": "Old",
"position_length": 1
},
{
"offset_from": 8,
"offset_to": 11,
"position": 2,
"text": "Man",
"position_length": 1
}
]
}"#;
let expected_value = Value::PreTokStr(PreTokenizedString {
text: String::from("The Old Man"),
tokens: vec![
Token {
offset_from: 0,
offset_to: 3,
position: 0,
text: String::from("The"),
position_length: 1,
},
Token {
offset_from: 4,
offset_to: 7,
position: 1,
text: String::from("Old"),
position_length: 1,
},
Token {
offset_from: 8,
offset_to: 11,
position: 2,
text: String::from("Man"),
position_length: 1,
},
],
});
let deserialized_value = FieldType::Str(TextOptions::default())
.value_from_json(&serde_json::from_str(pre_tokenized_string_json).unwrap())
.unwrap();
assert_eq!(deserialized_value, expected_value);
let serialized_value_json = serde_json::to_string_pretty(&expected_value).unwrap();
assert_eq!(serialized_value_json, pre_tokenized_string_json);
}
} }

View File

@@ -1,9 +1,11 @@
use crate::common::BinarySerializable; use crate::Field;
use crate::schema::Field; use crate::Value;
use crate::schema::Value; //use serde::Deserialize;
use serde_derive::{Deserialize, Serialize};
use std::io; use std::io;
use std::io::Read; use std::io::Read;
use std::io::Write; use std::io::Write;
use tantivy_common::BinarySerializable;
/// `FieldValue` holds together a `Field` and its `Value`. /// `FieldValue` holds together a `Field` and its `Value`.
#[derive(Debug, Clone, Ord, PartialEq, Eq, PartialOrd, Serialize, Deserialize)] #[derive(Debug, Clone, Ord, PartialEq, Eq, PartialOrd, Serialize, Deserialize)]

View File

@@ -1,5 +1,5 @@
use crate::schema::IntOptions; use crate::IntOptions;
use crate::schema::TextOptions; use crate::TextOptions;
use std::ops::BitOr; use std::ops::BitOr;
#[derive(Clone)] #[derive(Clone)]

View File

@@ -1,3 +1,5 @@
use serde_derive::{Deserialize, Serialize};
/// `IndexRecordOption` describes an amount information associated /// `IndexRecordOption` describes an amount information associated
/// to a given indexed field. /// to a given indexed field.
/// ///
@@ -29,6 +31,22 @@ pub enum IndexRecordOption {
} }
impl IndexRecordOption { impl IndexRecordOption {
/// Returns true iff the term frequency will be encoded.
pub fn is_termfreq_enabled(self) -> bool {
match self {
IndexRecordOption::WithFreqsAndPositions | IndexRecordOption::WithFreqs => true,
_ => false,
}
}
/// Returns true iff the term positions within the document are stored as well.
pub fn is_position_enabled(self) -> bool {
match self {
IndexRecordOption::WithFreqsAndPositions => true,
_ => false,
}
}
/// Returns true iff this option includes encoding /// Returns true iff this option includes encoding
/// term frequencies. /// term frequencies.
pub fn has_freq(self) -> bool { pub fn has_freq(self) -> bool {

View File

@@ -1,4 +1,5 @@
use crate::schema::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag}; use crate::flags::{FastFlag, IndexedFlag, SchemaFlagList, StoredFlag};
use serde_derive::{Deserialize, Serialize};
use std::ops::BitOr; use std::ops::BitOr;
/// Express whether a field is single-value or multi-valued. /// Express whether a field is single-value or multi-valued.

View File

@@ -26,7 +26,7 @@ directory.
### Example ### Example
``` ```
use tantivy::schema::*; use tantivy_schema::*;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let title_options = TextOptions::default() let title_options = TextOptions::default()
.set_stored() .set_stored()
@@ -59,7 +59,7 @@ when [`searcher.doc(doc_address)`](../struct.Searcher.html#method.doc) is called
### Example ### Example
``` ```
use tantivy::schema::*; use tantivy_schema::*;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
let num_stars_options = IntOptions::default() let num_stars_options = IntOptions::default()
.set_stored() .set_stored()
@@ -93,7 +93,7 @@ using the `|` operator.
For instance, a schema containing the two fields defined in the example above could be rewritten : For instance, a schema containing the two fields defined in the example above could be rewritten :
``` ```
use tantivy::schema::*; use tantivy_schema::*;
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
schema_builder.add_u64_field("num_stars", INDEXED | STORED); schema_builder.add_u64_field("num_stars", INDEXED | STORED);
schema_builder.add_text_field("title", TEXT | STORED); schema_builder.add_text_field("title", TEXT | STORED);
@@ -126,7 +126,6 @@ pub use self::schema::{Schema, SchemaBuilder};
pub use self::value::Value; pub use self::value::Value;
pub use self::facet::Facet; pub use self::facet::Facet;
pub(crate) use self::facet::FACET_SEP_BYTE;
pub use self::document::Document; pub use self::document::Document;
pub use self::field::Field; pub use self::field::Field;
@@ -174,4 +173,5 @@ mod tests {
assert!(!is_valid_field_name("シャボン玉")); assert!(!is_valid_field_name("シャボン玉"));
assert!(is_valid_field_name("my_text_field")); assert!(is_valid_field_name("my_text_field"));
} }
} }

View File

@@ -1,4 +1,5 @@
use crate::schema::Value; use crate::Value;
use serde_derive::Serialize;
use std::collections::BTreeMap; use std::collections::BTreeMap;
/// Internal representation of a document used for JSON /// Internal representation of a document used for JSON

View File

@@ -1,14 +1,14 @@
use crate::schema::field_type::ValueParsingError;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::sync::Arc;
use super::*; use super::*;
use crate::schema::field_type::ValueParsingError;
use failure::Fail;
use serde::de::{SeqAccess, Visitor}; use serde::de::{SeqAccess, Visitor};
use serde::ser::SerializeSeq; use serde::ser::SerializeSeq;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::{self, Map as JsonObject, Value as JsonValue}; use serde_json::{self, Map as JsonObject, Value as JsonValue};
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::sync::Arc;
/// Tantivy has a very strict schema. /// Tantivy has a very strict schema.
/// You need to specify in advance whether a field is indexed or not, /// You need to specify in advance whether a field is indexed or not,
@@ -21,7 +21,7 @@ use std::fmt;
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// use tantivy::schema::*; /// use tantivy_schema::*;
/// ///
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING); /// let id_field = schema_builder.add_text_field("id", STRING);
@@ -167,7 +167,7 @@ impl SchemaBuilder {
/// Adds a field entry to the schema in build. /// Adds a field entry to the schema in build.
fn add_field(&mut self, field_entry: FieldEntry) -> Field { fn add_field(&mut self, field_entry: FieldEntry) -> Field {
let field = Field::from_field_id(self.fields.len() as u32); let field = Field(self.fields.len() as u32);
let field_name = field_entry.name().to_string(); let field_name = field_entry.name().to_string();
self.fields.push(field_entry); self.fields.push(field_entry);
self.fields_map.insert(field_name, field); self.fields_map.insert(field_name, field);
@@ -208,7 +208,7 @@ impl Eq for InnerSchema {}
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// use tantivy::schema::*; /// use tantivy_schema::*;
/// ///
/// let mut schema_builder = Schema::builder(); /// let mut schema_builder = Schema::builder();
/// let id_field = schema_builder.add_text_field("id", STRING); /// let id_field = schema_builder.add_text_field("id", STRING);
@@ -223,7 +223,7 @@ pub struct Schema(Arc<InnerSchema>);
impl Schema { impl Schema {
/// Return the `FieldEntry` associated to a `Field`. /// Return the `FieldEntry` associated to a `Field`.
pub fn get_field_entry(&self, field: Field) -> &FieldEntry { pub fn get_field_entry(&self, field: Field) -> &FieldEntry {
&self.0.fields[field.field_id() as usize] &self.0.fields[field.0 as usize]
} }
/// Return the field name for a given `Field`. /// Return the field name for a given `Field`.
@@ -232,12 +232,8 @@ impl Schema {
} }
/// Return the list of all the `Field`s. /// Return the list of all the `Field`s.
pub fn fields(&self) -> impl Iterator<Item = (Field, &FieldEntry)> { pub fn fields(&self) -> &[FieldEntry] {
self.0 &self.0.fields
.fields
.iter()
.enumerate()
.map(|(field_id, field_entry)| (Field::from_field_id(field_id as u32), field_entry))
} }
/// Creates a new builder. /// Creates a new builder.
@@ -489,32 +485,13 @@ mod tests {
let schema: Schema = serde_json::from_str(expected).unwrap(); let schema: Schema = serde_json::from_str(expected).unwrap();
let mut fields = schema.fields(); let mut fields = schema.fields().iter();
{
let (field, field_entry) = fields.next().unwrap(); assert_eq!("title", fields.next().unwrap().name());
assert_eq!("title", field_entry.name()); assert_eq!("author", fields.next().unwrap().name());
assert_eq!(0, field.field_id()); assert_eq!("count", fields.next().unwrap().name());
} assert_eq!("popularity", fields.next().unwrap().name());
{ assert_eq!("score", fields.next().unwrap().name());
let (field, field_entry) = fields.next().unwrap();
assert_eq!("author", field_entry.name());
assert_eq!(1, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("count", field_entry.name());
assert_eq!(2, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("popularity", field_entry.name());
assert_eq!(3, field.field_id());
}
{
let (field, field_entry) = fields.next().unwrap();
assert_eq!("score", field_entry.name());
assert_eq!(4, field.field_id());
}
assert!(fields.next().is_none()); assert!(fields.next().is_none());
} }

Some files were not shown because too many files have changed in this diff Show More