mirror of
https://github.com/quickwit-oss/tantivy.git
synced 2025-12-30 05:52:54 +00:00
Compare commits
60 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bb21d12a70 | ||
|
|
4565aba62a | ||
|
|
545a7ec8dd | ||
|
|
14908479d5 | ||
|
|
ab4593eeb7 | ||
|
|
e75bb1d6a1 | ||
|
|
63b9d62237 | ||
|
|
0098e3d428 | ||
|
|
69d5e4b9b1 | ||
|
|
e0cdd3114d | ||
|
|
f32b4a2ebe | ||
|
|
6ff60b8ed8 | ||
|
|
8da28fb6cf | ||
|
|
0df2a221da | ||
|
|
5449ec3c11 | ||
|
|
10f6c07c53 | ||
|
|
06e7bd18e7 | ||
|
|
37e4280c0a | ||
|
|
0ba1cf93f7 | ||
|
|
21a9940726 | ||
|
|
8600b8ea25 | ||
|
|
30f4f85d48 | ||
|
|
82d25b8397 | ||
|
|
2104c0277c | ||
|
|
dd37e109f2 | ||
|
|
cc23194c58 | ||
|
|
63868733a3 | ||
|
|
644d8a3a10 | ||
|
|
e32dba1a97 | ||
|
|
a78aa4c259 | ||
|
|
7e5f697d00 | ||
|
|
a78f4cca37 | ||
|
|
2e44f0f099 | ||
|
|
9ccba9f864 | ||
|
|
9101bf5753 | ||
|
|
23e97da9f6 | ||
|
|
1d439e96f5 | ||
|
|
934933582e | ||
|
|
98c7fbdc6f | ||
|
|
cec9956a01 | ||
|
|
c64972e039 | ||
|
|
b3b2421e8a | ||
|
|
f570fe37d4 | ||
|
|
6704ab6987 | ||
|
|
a12d211330 | ||
|
|
ee681a4dd1 | ||
|
|
d15efd6635 | ||
|
|
18814ba0c1 | ||
|
|
f247935bb9 | ||
|
|
6a197e023e | ||
|
|
96a313c6dd | ||
|
|
fb9b1c1f41 | ||
|
|
e1bca6db9d | ||
|
|
8438eda01a | ||
|
|
b373f00840 | ||
|
|
46decdb0ea | ||
|
|
835cdc2fe8 | ||
|
|
19756bb7d6 | ||
|
|
57e1f8ed28 | ||
|
|
2649c8a715 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
tantivy.iml
|
||||
*.swp
|
||||
target
|
||||
target/debug
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
Tantivy 0.7.1
|
||||
=====================
|
||||
- Bugfix: NGramTokenizer panics on non ascii chars
|
||||
- Added a space usage API
|
||||
|
||||
Tantivy 0.7
|
||||
=====================
|
||||
- Skip data for doc ids and positions (@fulmicoton),
|
||||
greatly improving performance
|
||||
- Tantivy error now rely on the failure crate (@drusellers)
|
||||
|
||||
- Added support for `AND`, `OR`, `NOT` syntax in addition to the `+`,`-` syntax
|
||||
- Added a snippet generator with highlight (@vigneshsarma, @fulmicoton)
|
||||
- Added a `TopFieldCollector` (@pentlander)
|
||||
|
||||
Tantivy 0.6.1
|
||||
=========================
|
||||
|
||||
20
Cargo.toml
20
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tantivy"
|
||||
version = "0.7.0-dev"
|
||||
version = "0.7.2"
|
||||
authors = ["Paul Masurel <paul.masurel@gmail.com>"]
|
||||
license = "MIT"
|
||||
categories = ["database-implementations", "data-structures"]
|
||||
@@ -12,10 +12,9 @@ readme = "README.md"
|
||||
keywords = ["search", "information", "retrieval"]
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.9.1"
|
||||
base64 = "0.10.0"
|
||||
byteorder = "1.0"
|
||||
lazy_static = "1"
|
||||
tinysegmenter = "0.1.0"
|
||||
regex = "1.0"
|
||||
fst = {version="0.3", default-features=false}
|
||||
fst-regex = { version="0.2" }
|
||||
@@ -33,7 +32,7 @@ num_cpus = "1.2"
|
||||
itertools = "0.7"
|
||||
levenshtein_automata = {version="0.1", features=["fst_automaton"]}
|
||||
bit-set = "0.5"
|
||||
uuid = { version = "0.6", features = ["v4", "serde"] }
|
||||
uuid = { version = "0.7", features = ["v4", "serde"] }
|
||||
crossbeam = "0.4"
|
||||
crossbeam-channel = "0.2"
|
||||
futures = "0.1"
|
||||
@@ -48,24 +47,33 @@ census = "0.1"
|
||||
fnv = "1.0.6"
|
||||
owned-read = "0.4"
|
||||
failure = "0.1"
|
||||
htmlescape = "0.3.1"
|
||||
fail = "0.2"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = "0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.5"
|
||||
maplit = "1"
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
debug = false
|
||||
lto = true
|
||||
debug-assertions = false
|
||||
|
||||
[profile.test]
|
||||
debug-assertions = true
|
||||
overflow-checks = true
|
||||
|
||||
[features]
|
||||
default = ["mmap"]
|
||||
# by default no-fail is disabled. We manually enable it when running test.
|
||||
default = ["mmap", "no_fail"]
|
||||
mmap = ["fst/mmap", "atomicwrites"]
|
||||
lz4-compression = ["lz4"]
|
||||
no_fail = ["fail/no_fail"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "tantivy-search/tantivy" }
|
||||
|
||||
|
||||
|
||||
17
README.md
17
README.md
@@ -4,6 +4,7 @@
|
||||
[](https://gitter.im/tantivy-search/tantivy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://ci.appveyor.com/project/fulmicoton/tantivy/branch/master)
|
||||
[](https://saythanks.io/to/fulmicoton)
|
||||
|
||||

|
||||
|
||||
@@ -20,7 +21,7 @@
|
||||
|
||||
**Tantivy** is a **full text search engine library** written in rust.
|
||||
|
||||
It is closer to Lucene than to Elastic Search and Solr in the sense it is not
|
||||
It is closer to [Apache Lucene](https://lucene.apache.org/) than to [Elastic Search](https://www.elastic.co/products/elasticsearch) and [Apache Solr](https://lucene.apache.org/solr/) in the sense it is not
|
||||
an off-the-shelf search engine server, but rather a crate that can be used
|
||||
to build such a search engine.
|
||||
|
||||
@@ -32,8 +33,8 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
- Fast (check out the :racehorse: :sparkles: [benchmark](https://tantivy-search.github.io/bench/) :sparkles: :racehorse:)
|
||||
- Tiny startup time (<10ms), perfect for command line tools
|
||||
- BM25 scoring (the same as lucene)
|
||||
- Basic query language (`+michael +jackson`)
|
||||
- Phrase queries search (\"michael jackson\"`)
|
||||
- Natural query language `(michael AND jackson) OR "king of pop"`
|
||||
- Phrase queries search (`"michael jackson"`)
|
||||
- Incremental indexing
|
||||
- Multithreaded indexing (indexing English Wikipedia takes < 3 minutes on my desktop)
|
||||
- Mmap directory
|
||||
@@ -43,12 +44,14 @@ Tantivy is, in fact, strongly inspired by Lucene's design.
|
||||
- LZ4 compressed document store
|
||||
- Range queries
|
||||
- Faceted search
|
||||
- Configurable indexing (optional term frequency and position indexing
|
||||
- Configurable indexing (optional term frequency and position indexing)
|
||||
- Cheesy logo with a horse
|
||||
|
||||
# Non-features
|
||||
|
||||
- Distributed search and will not be in the scope of tantivy.
|
||||
- Distributed search is out of the scope of tantivy. That being said, tantivy is meant as a
|
||||
library upon which one could build a distributed search. Serializable/mergeable collector state for instance,
|
||||
are within the scope of tantivy.
|
||||
|
||||
|
||||
# Supported OS and compiler
|
||||
@@ -77,6 +80,10 @@ To check out and run tests, you can simply run :
|
||||
cd tantivy
|
||||
cargo build
|
||||
|
||||
## Running tests
|
||||
|
||||
Some tests will not run with just `cargo test` because of `fail-rs`.
|
||||
To run the tests exhaustively, run `./run-tests.sh`.
|
||||
|
||||
# Contribute
|
||||
|
||||
|
||||
@@ -18,5 +18,5 @@ install:
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose
|
||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||
- REM SET RUST_LOG=tantivy,test & cargo test --verbose --no-default-features --features mmap -- --test-threads 1
|
||||
- REM SET RUST_BACKTRACE=1 & cargo build --examples
|
||||
|
||||
@@ -11,12 +11,11 @@ main() {
|
||||
else
|
||||
echo "Build"
|
||||
cross build --target $TARGET
|
||||
cross build --target $TARGET --release
|
||||
if [ ! -z $DISABLE_TESTS ]; then
|
||||
return
|
||||
fi
|
||||
echo "Test"
|
||||
cross test --target $TARGET
|
||||
cross test --target $TARGET --no-default-features --features mmap -- --test-threads 1
|
||||
fi
|
||||
for example in $(ls examples/*.rs)
|
||||
do
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
- [Facetting](./facetting.md)
|
||||
- [Innerworkings](./innerworkings.md)
|
||||
- [Inverted index](./inverted_index.md)
|
||||
- [Best practise](./inverted_index.md)
|
||||
|
||||
[Frequently Asked Questions](./faq.md)
|
||||
[Examples](./examples.md)
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
> Tantivy is a **search** engine **library** for Rust.
|
||||
|
||||
If you are familiar with Lucene, tantivy is heavily inspired by Lucene's design and
|
||||
they both have the same scope and targetted users.
|
||||
If you are familiar with Lucene, it's an excellent approximation to consider tantivy as Lucene for rust. tantivy is heavily inspired by Lucene's design and
|
||||
they both have the same scope and targetted use cases.
|
||||
|
||||
If you are not familiar with Lucene, let's break down our little tagline.
|
||||
|
||||
@@ -17,15 +17,18 @@ relevancy, collapsing, highlighting, spatial search.
|
||||
experience. But keep in mind this is just a toolbox.
|
||||
Which bring us to the second keyword...
|
||||
|
||||
- **Library** means that you will have to write code. tantivy is not an *all-in-one* server solution.
|
||||
|
||||
Sometimes a functionality will not be available in tantivy because it is too specific to your use case. By design, tantivy should make it possible to extend
|
||||
the available set of features using the existing rock-solid datastructures.
|
||||
- **Library** means that you will have to write code. tantivy is not an *all-in-one* server solution like elastic search for instance.
|
||||
|
||||
Most frequently this will mean writing your own `Collector`, your own `Scorer` or your own
|
||||
`Tokenizer/TokenFilter`... But some of your requirement may also be related to
|
||||
architecture or operations. For instance, you may want to build a large corpus on Hadoop,
|
||||
fine-tune the merge policy to keep your index sharded in a time-wise fashion, or you may want
|
||||
to convert and existing index from a different format.
|
||||
|
||||
Tantivy exposes its API to do all of these things.
|
||||
Sometimes a functionality will not be available in tantivy because it is too
|
||||
specific to your use case. By design, tantivy should make it possible to extend
|
||||
the available set of features using the existing rock-solid datastructures.
|
||||
|
||||
Most frequently this will mean writing your own `Collector`, your own `Scorer` or your own
|
||||
`TokenFilter`... Some of your requirements may also be related to
|
||||
something closer to architecture or operations. For instance, you may
|
||||
want to build a large corpus on Hadoop, fine-tune the merge policy to keep your
|
||||
index sharded in a time-wise fashion, or you may want to convert and existing
|
||||
index from a different format.
|
||||
|
||||
Tantivy exposes a lot of low level API to do all of these things.
|
||||
|
||||
|
||||
@@ -2,47 +2,76 @@
|
||||
|
||||
## Straight from disk
|
||||
|
||||
By default, tantivy accesses its data using its `MMapDirectory`.
|
||||
While this design has some downsides, this greatly simplifies the source code of tantivy,
|
||||
and entirely delegates the caching to the OS.
|
||||
Tantivy accesses its data using an abstracting trait called `Directory`.
|
||||
In theory, one can come and override the data access logic. In practise, the
|
||||
trait somewhat assumes that your data can be mapped to memory, and tantivy
|
||||
seems deeply married to using `mmap` for its io [^1], and the only persisting
|
||||
directory shipped with tantivy is the `MmapDirectory`.
|
||||
|
||||
`tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk.
|
||||
As a result, the act of opening an indexing does not involve loading different datastructures
|
||||
from the disk into random access memory : starting a process, opening an index, and performing a query
|
||||
can typically be done in a matter of milliseconds.
|
||||
While this design has some downsides, this greatly simplifies the source code of
|
||||
tantivy. Caching is also entirely delegated to the OS.
|
||||
|
||||
This is an interesting property for a command line search engine, or for some multi-tenant log search engine.
|
||||
Spawning a new process for each new query can be a perfectly sensible solution in some use case.
|
||||
`tantivy` works entirely (or almost) by directly reading the datastructures as they are layed on disk. As a result, the act of opening an indexing does not involve loading different datastructures from the disk into random access memory : starting a process, opening an index, and performing your first query can typically be done in a matter of milliseconds.
|
||||
|
||||
This is an interesting property for a command line search engine, or for some multi-tenant log search engine : spawning a new process for each new query can be a perfectly sensible solution in some use case.
|
||||
|
||||
In later chapters, we will discuss tantivy's inverted index data layout.
|
||||
One key take away is that to achieve great performance, search indexes are extremely compact.
|
||||
One key take away is that to achieve great performance, search indexes are extremely compact.
|
||||
Of course this is crucial to reduce IO, and ensure that as much of our index can sit in RAM.
|
||||
|
||||
Also, whenever possible the data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access
|
||||
the data from your spinning hard disk, but this is also a great property when working with `SSD` or `RAM`,
|
||||
as it makes our read patterns very predictable for the CPU.
|
||||
Also, whenever possible its data is accessed sequentially. Of course, this is an amazing property when tantivy needs to access the data from your spinning hard disk, but this is also
|
||||
critical for performance, if your data is read from and an `SSD` or even already in your pagecache.
|
||||
|
||||
|
||||
## Segments, and the log method
|
||||
|
||||
That kind compact layout comes at one cost: it prevents our datastructures from being dynamic.
|
||||
In fact, a trait called `Directory` is in charge of abstracting all of tantivy's data access
|
||||
and its API does not even allow editing these file once they are written.
|
||||
That kind of compact layout comes at one cost: it prevents our datastructures from being dynamic.
|
||||
In fact, the `Directory` trait does not even allow you to modify part of a file.
|
||||
|
||||
To allow the addition / deletion of documents, and create the illusion that
|
||||
your index is dynamic (i.e.: adding and deleting documents), tantivy uses a common database trick sometimes
|
||||
referred to as the *log method*.
|
||||
your index is dynamic (i.e.: adding and deleting documents), tantivy uses a common database trick sometimes referred to as the *log method*.
|
||||
|
||||
Let's forget about deletes for a moment. As you add documents, these documents are processed and stored in
|
||||
a dedicated datastructure, in a `RAM` buffer. This datastructure is designed to be dynamic but
|
||||
cannot be accessed for search. As you add documents, this buffer will reach its capacity and tantivy will
|
||||
transparently stop adding document to it and start converting this datastructure to its final
|
||||
read-only format on disk. Once written, an brand empty buffer is available to resume adding documents.
|
||||
Let's forget about deletes for a moment.
|
||||
|
||||
As you add documents, these documents are processed and stored in a dedicated datastructure, in a `RAM` buffer. This datastructure is not ready for search, but it is useful to receive your data and rearrange it very rapidly.
|
||||
|
||||
As you add documents, this buffer will reach its capacity and tantivy will transparently stop adding document to it and start converting this datastructure to its final read-only format on disk. Once written, an brand empty buffer is available to resume adding documents.
|
||||
|
||||
The resulting chunk of index obtained after this serialization is called a `Segment`.
|
||||
|
||||
> A segment is a self-contained atomic piece of index. It is identified with a UUID, and all of its files
|
||||
are identified using the naming scheme : `<UUID>.*`.
|
||||
> A segment is a self-contained atomic piece of index. It is identified with a UUID, and all of its files are identified using the naming scheme : `<UUID>.*`.
|
||||
|
||||
Which brings us to the nature of a tantivy `Index`.
|
||||
|
||||
> A tantivy `Index` is a collection of `Segments`.
|
||||
|
||||
Physically, this really just means and index is a bunch of segment files in a given `Directory`,
|
||||
linked together by a `meta.json` file. This transparency can become extremely handy
|
||||
to get tantivy to fit your use case:
|
||||
|
||||
*Example 1* You could for instance use hadoop to build a very large search index in a timely manner, copy all of the resulting segment files in the same directory and edit the `meta.json` to get a functional index.[^2]
|
||||
|
||||
*Example 2* You could also disable your merge policy and enforce daily segments. Removing data after one week can then be done very efficiently by just editing the `meta.json` and deleting the files associated to segment `D-7`.
|
||||
|
||||
|
||||
> A tantivy `Index` is a collection of `Segments`.
|
||||
|
||||
|
||||
|
||||
# Merging
|
||||
|
||||
As you index more and more data, your index will accumulate more and more segments.
|
||||
Having a lot of small segments is not really optimal. There is a bit of redundancy in having
|
||||
all these term dictionary. Also when searching, we will need to do term lookups as many times as we have segments. It can hurt search performance a bit.
|
||||
|
||||
That's where merging or compacting comes into place. Tantivy will continuously consider merge
|
||||
opportunities and start merging segments in the background.
|
||||
|
||||
|
||||
# Indexing throughput, number of indexing threads
|
||||
|
||||
|
||||
|
||||
|
||||
[^1]: This may eventually change.
|
||||
|
||||
[^2]: Be careful however. By default these files will not be considered as *managed* by tantivy. This means they will never be garbage collected by tantivy, regardless of whether they become obsolete or not.
|
||||
|
||||
0
doc/src/best_practise.md.rs
Normal file
0
doc/src/best_practise.md.rs
Normal file
@@ -1 +1,3 @@
|
||||
# Examples
|
||||
|
||||
- [Basic search](/examples/basic_search.html)
|
||||
@@ -10,7 +10,6 @@
|
||||
// - search for the best document matchings "sea whale"
|
||||
// - retrieve the best document original content.
|
||||
|
||||
|
||||
extern crate tempdir;
|
||||
|
||||
// ---
|
||||
@@ -231,13 +230,11 @@ fn main() -> tantivy::Result<()> {
|
||||
// a title.
|
||||
|
||||
for doc_address in doc_addresses {
|
||||
let retrieved_doc = searcher.doc(&doc_address)?;
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
use tempdir::TempDir;
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
// In this example, we'll see how to define a tokenizer pipeline
|
||||
// by aligning a bunch of `TokenFilter`.
|
||||
|
||||
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopCollector;
|
||||
@@ -12,7 +11,6 @@ use tantivy::schema::*;
|
||||
use tantivy::tokenizer::NgramTokenizer;
|
||||
use tantivy::Index;
|
||||
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// # Defining the schema
|
||||
//
|
||||
@@ -111,7 +109,7 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
let doc_addresses = top_collector.docs();
|
||||
for doc_address in doc_addresses {
|
||||
let retrieved_doc = searcher.doc(&doc_address)?;
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
|
||||
@@ -11,10 +11,9 @@
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopCollector;
|
||||
use tantivy::query::TermQuery;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::query::TermQuery;
|
||||
|
||||
|
||||
// A simple helper function to fetch a single document
|
||||
// given its id from our index.
|
||||
@@ -31,8 +30,8 @@ fn extract_doc_given_isbn(index: &Index, isbn_term: &Term) -> tantivy::Result<Op
|
||||
let mut top_collector = TopCollector::with_limit(1);
|
||||
searcher.search(&term_query, &mut top_collector)?;
|
||||
|
||||
if let Some(doc_address) = top_collector.docs().first() {
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
if let Some(doc_address) = top_collector.docs().first() {
|
||||
let doc = searcher.doc(*doc_address)?;
|
||||
Ok(Some(doc))
|
||||
} else {
|
||||
// no doc matching this ID.
|
||||
@@ -41,7 +40,6 @@ fn extract_doc_given_isbn(index: &Index, isbn_term: &Term) -> tantivy::Result<Op
|
||||
}
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
|
||||
// # Defining the schema
|
||||
//
|
||||
// Check out the *basic_search* example if this makes
|
||||
@@ -126,7 +124,6 @@ fn main() -> tantivy::Result<()> {
|
||||
isbn => "978-9176370711",
|
||||
));
|
||||
|
||||
|
||||
// You are guaranteed that your clients will only observe your index in
|
||||
// the state it was in after a commit.
|
||||
// In this example, your search engine will at no point be missing the *Frankenstein* document.
|
||||
@@ -143,4 +140,4 @@ fn main() -> tantivy::Result<()> {
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,60 +22,60 @@ use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new("tantivy_facet_example_dir")?;
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new("tantivy_facet_example_dir")?;
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
|
||||
schema_builder.add_text_field("name", TEXT | STORED);
|
||||
schema_builder.add_text_field("name", TEXT | STORED);
|
||||
|
||||
// this is our faceted field
|
||||
schema_builder.add_facet_field("tags");
|
||||
// this is our faceted field
|
||||
schema_builder.add_facet_field("tags");
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
let name = schema.get_field("name").unwrap();
|
||||
let tags = schema.get_field("tags").unwrap();
|
||||
let name = schema.get_field("name").unwrap();
|
||||
let tags = schema.get_field("tags").unwrap();
|
||||
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
index_writer.add_document(doc!(
|
||||
// For convenience, tantivy also comes with a macro to
|
||||
// reduce the boilerplate above.
|
||||
index_writer.add_document(doc!(
|
||||
name => "the ditch",
|
||||
tags => Facet::from("/pools/north")
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
index_writer.add_document(doc!(
|
||||
name => "little stacey",
|
||||
tags => Facet::from("/pools/south")
|
||||
));
|
||||
|
||||
index_writer.commit()?;
|
||||
index_writer.commit()?;
|
||||
|
||||
index.load_searchers()?;
|
||||
index.load_searchers()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let searcher = index.searcher();
|
||||
|
||||
let mut facet_collector = FacetCollector::for_field(tags);
|
||||
facet_collector.add_facet("/pools");
|
||||
let mut facet_collector = FacetCollector::for_field(tags);
|
||||
facet_collector.add_facet("/pools");
|
||||
|
||||
searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
searcher.search(&AllQuery, &mut facet_collector).unwrap();
|
||||
|
||||
let counts = facet_collector.harvest();
|
||||
// This lists all of the facet counts
|
||||
let facets: Vec<(&Facet, u64)> = counts.get("/pools").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/pools/north"), 1),
|
||||
(&Facet::from("/pools/south"), 1)
|
||||
]
|
||||
);
|
||||
let counts = facet_collector.harvest();
|
||||
// This lists all of the facet counts
|
||||
let facets: Vec<(&Facet, u64)> = counts.get("/pools").collect();
|
||||
assert_eq!(
|
||||
facets,
|
||||
vec![
|
||||
(&Facet::from("/pools/north"), 1),
|
||||
(&Facet::from("/pools/south"), 1),
|
||||
]
|
||||
);
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use tempdir::TempDir;
|
||||
|
||||
@@ -7,18 +7,15 @@
|
||||
// the list of documents containing a term, getting
|
||||
// its term frequency, and accessing its positions.
|
||||
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::{DocSet, DocId, Postings};
|
||||
use tantivy::{DocId, DocSet, Postings};
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
|
||||
|
||||
// We first create a schema for the sake of the
|
||||
// example. Check the `basic_search` example for more information.
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
@@ -47,7 +44,6 @@ fn main() -> tantivy::Result<()> {
|
||||
// there is actually only one segment here, but let's iterate through the list
|
||||
// anyway)
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
@@ -58,19 +54,18 @@ fn main() -> tantivy::Result<()> {
|
||||
// Let's go through all docs containing the term `title:the` and access their position
|
||||
let term_the = Term::from_field_text(title, "the");
|
||||
|
||||
|
||||
// This segment posting object is like a cursor over the documents matching the term.
|
||||
// The `IndexRecordOption` arguments tells tantivy we will be interested in both term frequencies
|
||||
// and positions.
|
||||
//
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut segment_postings) = inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions) {
|
||||
|
||||
if let Some(mut segment_postings) =
|
||||
inverted_index.read_postings(&term_the, IndexRecordOption::WithFreqsAndPositions)
|
||||
{
|
||||
// this buffer will be used to request for positions
|
||||
let mut positions: Vec<u32> = Vec::with_capacity(100);
|
||||
while segment_postings.advance() {
|
||||
|
||||
// the number of time the term appears in the document.
|
||||
let doc_id: DocId = segment_postings.doc(); //< do not try to access this before calling advance once.
|
||||
|
||||
@@ -98,7 +93,6 @@ fn main() -> tantivy::Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// A `Term` is a text token associated with a field.
|
||||
// Let's go through all docs containing the term `title:the` and access their position
|
||||
let term_the = Term::from_field_text(title, "the");
|
||||
@@ -111,7 +105,6 @@ fn main() -> tantivy::Result<()> {
|
||||
// Also, for some VERY specific high performance use case like an OLAP analysis of logs,
|
||||
// you can get better performance by accessing directly the blocks of doc ids.
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
|
||||
// A segment contains different data structure.
|
||||
// Inverted index stands for the combination of
|
||||
// - the term dictionary
|
||||
@@ -124,7 +117,9 @@ fn main() -> tantivy::Result<()> {
|
||||
//
|
||||
// If you don't need all this information, you may get better performance by decompressing less
|
||||
// information.
|
||||
if let Some(mut block_segment_postings) = inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic) {
|
||||
if let Some(mut block_segment_postings) =
|
||||
inverted_index.read_block_postings(&term_the, IndexRecordOption::Basic)
|
||||
{
|
||||
while block_segment_postings.advance() {
|
||||
// Once again these docs MAY contains deleted documents as well.
|
||||
let docs = block_segment_postings.docs();
|
||||
@@ -136,4 +131,3 @@ fn main() -> tantivy::Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
71
examples/snippet.rs
Normal file
71
examples/snippet.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
// # Snippet example
|
||||
//
|
||||
// This example shows how to return a representative snippet of
|
||||
// your hit result.
|
||||
// Snippet are an extracted of a target document, and returned in HTML format.
|
||||
// The keyword searched by the user are highlighted with a `<b>` tag.
|
||||
extern crate tempdir;
|
||||
|
||||
// ---
|
||||
// Importing tantivy...
|
||||
#[macro_use]
|
||||
extern crate tantivy;
|
||||
use tantivy::collector::TopCollector;
|
||||
use tantivy::query::QueryParser;
|
||||
use tantivy::schema::*;
|
||||
use tantivy::Index;
|
||||
use tantivy::SnippetGenerator;
|
||||
use tempdir::TempDir;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// Let's create a temporary directory for the
|
||||
// sake of this example
|
||||
let index_path = TempDir::new("tantivy_example_dir")?;
|
||||
|
||||
// # Defining the schema
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let title = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
let body = schema_builder.add_text_field("body", TEXT | STORED);
|
||||
let schema = schema_builder.build();
|
||||
|
||||
// # Indexing documents
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
// we'll only need one doc for this example.
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
// ...
|
||||
index_writer.commit()?;
|
||||
|
||||
index.load_searchers()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
let query = query_parser.parse_query("sycamore spring")?;
|
||||
|
||||
let mut top_collector = TopCollector::with_limit(10);
|
||||
searcher.search(&*query, &mut top_collector)?;
|
||||
|
||||
let snippet_generator = SnippetGenerator::new(&searcher, &*query, body)?;
|
||||
|
||||
let doc_addresses = top_collector.docs();
|
||||
for doc_address in doc_addresses {
|
||||
let doc = searcher.doc(doc_address)?;
|
||||
let snippet = snippet_generator.snippet_from_doc(&doc);
|
||||
println!("title: {}", doc.get_first(title).unwrap().text().unwrap());
|
||||
println!("snippet: {}", snippet.to_html());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -22,72 +22,71 @@ use tantivy::tokenizer::*;
|
||||
use tantivy::Index;
|
||||
|
||||
fn main() -> tantivy::Result<()> {
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
let index_path = TempDir::new("tantivy_stopwords_example_dir")?;
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
// this example assumes you understand the content in `basic_search`
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
|
||||
// This configures your custom options for how tantivy will
|
||||
// store and process your content in the index; The key
|
||||
// to note is that we are setting the tokenizer to `stoppy`
|
||||
// which will be defined and registered below.
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("stoppy")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
// This configures your custom options for how tantivy will
|
||||
// store and process your content in the index; The key
|
||||
// to note is that we are setting the tokenizer to `stoppy`
|
||||
// which will be defined and registered below.
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("stoppy")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
|
||||
// Our first field is title.
|
||||
schema_builder.add_text_field("title", text_options);
|
||||
// Our first field is title.
|
||||
schema_builder.add_text_field("title", text_options);
|
||||
|
||||
// Our second field is body.
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("stoppy")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
schema_builder.add_text_field("body", text_options);
|
||||
// Our second field is body.
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("stoppy")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
schema_builder.add_text_field("body", text_options);
|
||||
|
||||
let schema = schema_builder.build();
|
||||
let schema = schema_builder.build();
|
||||
|
||||
let index = Index::create_in_dir(&index_path, schema.clone())?;
|
||||
let index = Index::create_in_ram(schema.clone());
|
||||
|
||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||
// then removes all instances of `the` and `and` from the corpus
|
||||
let tokenizer = SimpleTokenizer
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec![
|
||||
"the".to_string(),
|
||||
"and".to_string(),
|
||||
]));
|
||||
// This tokenizer lowers all of the text (to help with stop word matching)
|
||||
// then removes all instances of `the` and `and` from the corpus
|
||||
let tokenizer = SimpleTokenizer
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec![
|
||||
"the".to_string(),
|
||||
"and".to_string(),
|
||||
]));
|
||||
|
||||
index.tokenizers().register("stoppy", tokenizer);
|
||||
index.tokenizers().register("stoppy", tokenizer);
|
||||
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
let mut index_writer = index.writer(50_000_000)?;
|
||||
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
let title = schema.get_field("title").unwrap();
|
||||
let body = schema.get_field("body").unwrap();
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
index_writer.add_document(doc!(
|
||||
title => "The Old Man and the Sea",
|
||||
body => "He was an old man who fished alone in a skiff in the Gulf Stream and \
|
||||
he had gone eighty-four days now without taking a fish."
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "Of Mice and Men",
|
||||
body => "A few miles south of Soledad, the Salinas River drops in close to the hillside \
|
||||
bank and runs deep and green. The water is warm too, for it has slipped twinkling \
|
||||
over the yellow sands in the sunlight before reaching the narrow pool. On one \
|
||||
side of the river the golden foothill slopes curve up to the strong and rocky \
|
||||
Gabilan Mountains, but on the valley side the water is lined with trees—willows \
|
||||
fresh and green with every spring, carrying in their lower leaf junctures the \
|
||||
debris of the winter’s flooding; and sycamores with mottled, white, recumbent \
|
||||
limbs and branches that arch over the pool"
|
||||
));
|
||||
|
||||
index_writer.add_document(doc!(
|
||||
index_writer.add_document(doc!(
|
||||
title => "Frankenstein",
|
||||
body => "You will rejoice to hear that no disaster has accompanied the commencement of an \
|
||||
enterprise which you have regarded with such evil forebodings. I arrived here \
|
||||
@@ -95,35 +94,28 @@ fn main() -> tantivy::Result<()> {
|
||||
increasing confidence in the success of my undertaking."
|
||||
));
|
||||
|
||||
index_writer.commit()?;
|
||||
index_writer.commit()?;
|
||||
|
||||
index.load_searchers()?;
|
||||
index.load_searchers()?;
|
||||
|
||||
let searcher = index.searcher();
|
||||
let searcher = index.searcher();
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
let query_parser = QueryParser::for_index(&index, vec![title, body]);
|
||||
|
||||
// this will have NO hits because it was filtered out
|
||||
// because the query is run through the analyzer you
|
||||
// actually will get an error here because the query becomes
|
||||
// empty
|
||||
assert!(query_parser.parse_query("the").is_err());
|
||||
// stop words are applied on the query as well.
|
||||
// The following will be equivalent to `title:frankenstein`
|
||||
let query = query_parser.parse_query("title:\"the Frankenstein\"")?;
|
||||
|
||||
// this will have hits
|
||||
let query = query_parser.parse_query("is")?;
|
||||
let mut top_collector = TopCollector::with_limit(10);
|
||||
|
||||
let mut top_collector = TopCollector::with_limit(10);
|
||||
searcher.search(&*query, &mut top_collector)?;
|
||||
|
||||
searcher.search(&*query, &mut top_collector)?;
|
||||
let doc_addresses = top_collector.docs();
|
||||
|
||||
let doc_addresses = top_collector.docs();
|
||||
for doc_address in doc_addresses {
|
||||
let retrieved_doc = searcher.doc(doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
for doc_address in doc_addresses {
|
||||
let retrieved_doc = searcher.doc(&doc_address)?;
|
||||
println!("{}", schema.to_json(&retrieved_doc));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use tempdir::TempDir;
|
||||
|
||||
2
run-tests.sh
Executable file
2
run-tests.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
cargo test --no-default-features --features mmap -- --test-threads 1
|
||||
@@ -342,16 +342,19 @@ impl FacetCollector {
|
||||
pub fn harvest(mut self) -> FacetCounts {
|
||||
self.finalize_segment();
|
||||
|
||||
let collapsed_facet_ords: Vec<&[u64]> = self.segment_counters
|
||||
let collapsed_facet_ords: Vec<&[u64]> = self
|
||||
.segment_counters
|
||||
.iter()
|
||||
.map(|segment_counter| &segment_counter.facet_ords[..])
|
||||
.collect();
|
||||
let collapsed_facet_counts: Vec<&[u64]> = self.segment_counters
|
||||
let collapsed_facet_counts: Vec<&[u64]> = self
|
||||
.segment_counters
|
||||
.iter()
|
||||
.map(|segment_counter| &segment_counter.facet_counts[..])
|
||||
.collect();
|
||||
|
||||
let facet_streams = self.segment_counters
|
||||
let facet_streams = self
|
||||
.segment_counters
|
||||
.iter()
|
||||
.map(|seg_counts| seg_counts.facet_reader.facet_dict().range().into_stream())
|
||||
.collect::<Vec<_>>();
|
||||
@@ -374,10 +377,8 @@ impl FacetCollector {
|
||||
} else {
|
||||
collapsed_facet_counts[seg_ord][collapsed_term_id]
|
||||
}
|
||||
})
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.sum();
|
||||
}).unwrap_or(0)
|
||||
}).sum();
|
||||
if count > 0u64 {
|
||||
let bytes: Vec<u8> = facet_merger.key().to_owned();
|
||||
// may create an corrupted facet if the term dicitonary is corrupted
|
||||
@@ -402,7 +403,8 @@ impl Collector for FacetCollector {
|
||||
|
||||
fn collect(&mut self, doc: DocId, _: Score) {
|
||||
let facet_reader: &mut FacetReader = unsafe {
|
||||
&mut *self.ff_reader
|
||||
&mut *self
|
||||
.ff_reader
|
||||
.as_ref()
|
||||
.expect("collect() was called before set_segment. This should never happen.")
|
||||
.get()
|
||||
@@ -476,9 +478,8 @@ impl FacetCounts {
|
||||
heap.push(Hit { count, facet });
|
||||
}
|
||||
|
||||
let mut lowest_count: u64 = heap.peek().map(|hit| hit.count)
|
||||
.unwrap_or(u64::MIN); //< the `unwrap_or` case may be triggered but the value
|
||||
// is never used in that case.
|
||||
let mut lowest_count: u64 = heap.peek().map(|hit| hit.count).unwrap_or(u64::MIN); //< the `unwrap_or` case may be triggered but the value
|
||||
// is never used in that case.
|
||||
|
||||
for (facet, count) in it {
|
||||
if count > lowest_count {
|
||||
@@ -526,8 +527,7 @@ mod tests {
|
||||
n /= 4;
|
||||
let leaf = n % 5;
|
||||
Facet::from(&format!("/top{}/mid{}/leaf{}", top, mid, leaf))
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
for i in 0..num_facets * 10 {
|
||||
let mut doc = Document::new();
|
||||
doc.add_facet(facet_field, facets[i % num_facets].clone());
|
||||
@@ -554,7 +554,8 @@ mod tests {
|
||||
("/top1/mid1", 50),
|
||||
("/top1/mid2", 50),
|
||||
("/top1/mid3", 50),
|
||||
].iter()
|
||||
]
|
||||
.iter()
|
||||
.map(|&(facet_str, count)| (String::from(facet_str), count))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
@@ -618,9 +619,13 @@ mod tests {
|
||||
let facet = Facet::from(&format!("/facet/{}", c));
|
||||
let doc = doc!(facet_field => facet);
|
||||
iter::repeat(doc).take(count)
|
||||
})
|
||||
.map(|mut doc| { doc.add_facet(facet_field, &format!("/facet/{}", thread_rng().sample(&uniform) )); doc})
|
||||
.collect();
|
||||
}).map(|mut doc| {
|
||||
doc.add_facet(
|
||||
facet_field,
|
||||
&format!("/facet/{}", thread_rng().sample(&uniform)),
|
||||
);
|
||||
doc
|
||||
}).collect();
|
||||
thread_rng().shuffle(&mut docs[..]);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
|
||||
@@ -15,7 +15,14 @@ mod multi_collector;
|
||||
pub use self::multi_collector::MultiCollector;
|
||||
|
||||
mod top_collector;
|
||||
pub use self::top_collector::TopCollector;
|
||||
|
||||
mod top_score_collector;
|
||||
pub use self::top_score_collector::TopScoreCollector;
|
||||
#[deprecated]
|
||||
pub use self::top_score_collector::TopScoreCollector as TopCollector;
|
||||
|
||||
mod top_field_collector;
|
||||
pub use self::top_field_collector::TopFieldCollector;
|
||||
|
||||
mod facet_collector;
|
||||
pub use self::facet_collector::FacetCollector;
|
||||
|
||||
@@ -100,11 +100,11 @@ impl<'a> Collector for MultiCollector<'a> {
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use collector::{Collector, CountCollector, TopCollector};
|
||||
use collector::{Collector, CountCollector, TopScoreCollector};
|
||||
|
||||
#[test]
|
||||
fn test_multi_collector() {
|
||||
let mut top_collector = TopCollector::with_limit(2);
|
||||
let mut top_collector = TopScoreCollector::with_limit(2);
|
||||
let mut count_collector = CountCollector::default();
|
||||
{
|
||||
let mut collectors =
|
||||
|
||||
@@ -1,115 +1,61 @@
|
||||
use super::Collector;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use DocAddress;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
|
||||
// Rust heap is a max-heap and we need a min heap.
|
||||
/// Contains a feature (field, score, etc.) of a document along with the document address.
|
||||
///
|
||||
/// It has a custom implementation of `PartialOrd` that reverses the order. This is because the
|
||||
/// default Rust heap is a max heap, whereas a min heap is needed.
|
||||
#[derive(Clone, Copy)]
|
||||
struct GlobalScoredDoc {
|
||||
score: Score,
|
||||
pub struct ComparableDoc<T> {
|
||||
feature: T,
|
||||
doc_address: DocAddress,
|
||||
}
|
||||
|
||||
impl PartialOrd for GlobalScoredDoc {
|
||||
fn partial_cmp(&self, other: &GlobalScoredDoc) -> Option<Ordering> {
|
||||
impl<T: PartialOrd> PartialOrd for ComparableDoc<T> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for GlobalScoredDoc {
|
||||
impl<T: PartialOrd> Ord for ComparableDoc<T> {
|
||||
#[inline]
|
||||
fn cmp(&self, other: &GlobalScoredDoc) -> Ordering {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
other
|
||||
.score
|
||||
.partial_cmp(&self.score)
|
||||
.feature
|
||||
.partial_cmp(&self.feature)
|
||||
.unwrap_or_else(|| other.doc_address.cmp(&self.doc_address))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for GlobalScoredDoc {
|
||||
fn eq(&self, other: &GlobalScoredDoc) -> bool {
|
||||
impl<T: PartialOrd> PartialEq for ComparableDoc<T> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.cmp(other) == Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for GlobalScoredDoc {}
|
||||
impl<T: PartialOrd> Eq for ComparableDoc<T> {}
|
||||
|
||||
/// The Top Collector keeps track of the K documents
|
||||
/// with the best scores.
|
||||
/// sorted by type `T`.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{SchemaBuilder, TEXT};
|
||||
/// use tantivy::{Index, Result, DocId, Score};
|
||||
/// use tantivy::collector::TopCollector;
|
||||
/// use tantivy::query::QueryParser;
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = SchemaBuilder::new();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
///
|
||||
/// {
|
||||
/// let mut top_collector = TopCollector::with_limit(2);
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// searcher.search(&*query, &mut top_collector).unwrap();
|
||||
///
|
||||
/// let score_docs: Vec<(Score, DocId)> = top_collector
|
||||
/// .score_docs()
|
||||
/// .into_iter()
|
||||
/// .map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
/// .collect();
|
||||
///
|
||||
/// assert_eq!(score_docs, vec![(0.7261542, 1), (0.6099695, 3)]);
|
||||
/// }
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
pub struct TopCollector {
|
||||
pub struct TopCollector<T> {
|
||||
limit: usize,
|
||||
heap: BinaryHeap<GlobalScoredDoc>,
|
||||
heap: BinaryHeap<ComparableDoc<T>>,
|
||||
segment_id: u32,
|
||||
}
|
||||
|
||||
impl TopCollector {
|
||||
impl<T: PartialOrd + Clone> TopCollector<T> {
|
||||
/// Creates a top collector, with a number of documents equal to "limit".
|
||||
///
|
||||
/// # Panics
|
||||
/// The method panics if limit is 0
|
||||
pub fn with_limit(limit: usize) -> TopCollector {
|
||||
pub fn with_limit(limit: usize) -> TopCollector<T> {
|
||||
if limit < 1 {
|
||||
panic!("Limit must be strictly greater than 0.");
|
||||
}
|
||||
@@ -125,23 +71,27 @@ impl TopCollector {
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn docs(&self) -> Vec<DocAddress> {
|
||||
self.score_docs()
|
||||
self.top_docs()
|
||||
.into_iter()
|
||||
.map(|score_doc| score_doc.1)
|
||||
.map(|(_feature, doc)| doc)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns K best ScoredDocument sorted in decreasing order.
|
||||
/// Returns K best FeatureDocuments sorted in decreasing order.
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn score_docs(&self) -> Vec<(Score, DocAddress)> {
|
||||
let mut scored_docs: Vec<GlobalScoredDoc> = self.heap.iter().cloned().collect();
|
||||
scored_docs.sort();
|
||||
scored_docs
|
||||
pub fn top_docs(&self) -> Vec<(T, DocAddress)> {
|
||||
let mut feature_docs: Vec<ComparableDoc<T>> = self.heap.iter().cloned().collect();
|
||||
feature_docs.sort();
|
||||
feature_docs
|
||||
.into_iter()
|
||||
.map(|GlobalScoredDoc { score, doc_address }| (score, doc_address))
|
||||
.collect()
|
||||
.map(
|
||||
|ComparableDoc {
|
||||
feature,
|
||||
doc_address,
|
||||
}| (feature, doc_address),
|
||||
).collect()
|
||||
}
|
||||
|
||||
/// Return true iff at least K documents have gone through
|
||||
@@ -150,46 +100,45 @@ impl TopCollector {
|
||||
pub fn at_capacity(&self) -> bool {
|
||||
self.heap.len() >= self.limit
|
||||
}
|
||||
}
|
||||
|
||||
impl Collector for TopCollector {
|
||||
fn set_segment(&mut self, segment_id: SegmentLocalId, _: &SegmentReader) -> Result<()> {
|
||||
/// Sets the segment local ID for the collector
|
||||
pub fn set_segment_id(&mut self, segment_id: SegmentLocalId) {
|
||||
self.segment_id = segment_id;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
/// Collects a document scored by the given feature
|
||||
///
|
||||
/// It collects documents until it has reached the max capacity. Once it reaches capacity, it
|
||||
/// will compare the lowest scoring item with the given one and keep whichever is greater.
|
||||
pub fn collect(&mut self, doc: DocId, feature: T) {
|
||||
if self.at_capacity() {
|
||||
// It's ok to unwrap as long as a limit of 0 is forbidden.
|
||||
let limit_doc: GlobalScoredDoc = *self.heap
|
||||
let limit_doc: ComparableDoc<T> = self
|
||||
.heap
|
||||
.peek()
|
||||
.expect("Top collector with size 0 is forbidden");
|
||||
if limit_doc.score < score {
|
||||
let mut mut_head = self.heap
|
||||
.expect("Top collector with size 0 is forbidden")
|
||||
.clone();
|
||||
if limit_doc.feature < feature {
|
||||
let mut mut_head = self
|
||||
.heap
|
||||
.peek_mut()
|
||||
.expect("Top collector with size 0 is forbidden");
|
||||
mut_head.score = score;
|
||||
mut_head.feature = feature;
|
||||
mut_head.doc_address = DocAddress(self.segment_id, doc);
|
||||
}
|
||||
} else {
|
||||
let wrapped_doc = GlobalScoredDoc {
|
||||
score,
|
||||
let wrapped_doc = ComparableDoc {
|
||||
feature,
|
||||
doc_address: DocAddress(self.segment_id, doc),
|
||||
};
|
||||
self.heap.push(wrapped_doc);
|
||||
}
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use collector::Collector;
|
||||
use DocId;
|
||||
use Score;
|
||||
|
||||
@@ -201,7 +150,7 @@ mod tests {
|
||||
top_collector.collect(5, 0.3);
|
||||
assert!(!top_collector.at_capacity());
|
||||
let score_docs: Vec<(Score, DocId)> = top_collector
|
||||
.score_docs()
|
||||
.top_docs()
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
.collect();
|
||||
@@ -219,7 +168,7 @@ mod tests {
|
||||
assert!(top_collector.at_capacity());
|
||||
{
|
||||
let score_docs: Vec<(Score, DocId)> = top_collector
|
||||
.score_docs()
|
||||
.top_docs()
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
.collect();
|
||||
@@ -238,7 +187,7 @@ mod tests {
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_top_0() {
|
||||
TopCollector::with_limit(0);
|
||||
let _collector: TopCollector<Score> = TopCollector::with_limit(0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
263
src/collector/top_field_collector.rs
Normal file
263
src/collector/top_field_collector.rs
Normal file
@@ -0,0 +1,263 @@
|
||||
use super::Collector;
|
||||
use collector::top_collector::TopCollector;
|
||||
use fastfield::FastFieldReader;
|
||||
use fastfield::FastValue;
|
||||
use schema::Field;
|
||||
use DocAddress;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentReader;
|
||||
|
||||
/// The Top Field Collector keeps track of the K documents
|
||||
/// sorted by a fast field in the index
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{SchemaBuilder, TEXT, FAST};
|
||||
/// use tantivy::{Index, Result, DocId};
|
||||
/// use tantivy::collector::TopFieldCollector;
|
||||
/// use tantivy::query::QueryParser;
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = SchemaBuilder::new();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let rating = schema_builder.add_u64_field("rating", FAST);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// rating => 92u64,
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// rating => 97u64,
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// rating => 63u64,
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// rating => 80u64,
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
///
|
||||
/// {
|
||||
/// let mut top_collector = TopFieldCollector::with_limit(rating, 2);
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// searcher.search(&*query, &mut top_collector).unwrap();
|
||||
///
|
||||
/// let score_docs: Vec<(u64, DocId)> = top_collector
|
||||
/// .top_docs()
|
||||
/// .into_iter()
|
||||
/// .map(|(field, doc_address)| (field, doc_address.doc()))
|
||||
/// .collect();
|
||||
///
|
||||
/// assert_eq!(score_docs, vec![(97u64, 1), (80, 3)]);
|
||||
/// }
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
pub struct TopFieldCollector<T: FastValue> {
|
||||
field: Field,
|
||||
collector: TopCollector<T>,
|
||||
fast_field: Option<FastFieldReader<T>>,
|
||||
}
|
||||
|
||||
impl<T: FastValue + PartialOrd + Clone> TopFieldCollector<T> {
|
||||
/// Creates a top field collector, with a number of documents equal to "limit".
|
||||
///
|
||||
/// The given field name must be a fast field, otherwise the collector have an error while
|
||||
/// collecting results.
|
||||
///
|
||||
/// # Panics
|
||||
/// The method panics if limit is 0
|
||||
pub fn with_limit(field: Field, limit: usize) -> Self {
|
||||
TopFieldCollector {
|
||||
field,
|
||||
collector: TopCollector::with_limit(limit),
|
||||
fast_field: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns K best documents sorted the given field name in decreasing order.
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn docs(&self) -> Vec<DocAddress> {
|
||||
self.collector.docs()
|
||||
}
|
||||
|
||||
/// Returns K best FieldDocuments sorted in decreasing order.
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn top_docs(&self) -> Vec<(T, DocAddress)> {
|
||||
self.collector.top_docs()
|
||||
}
|
||||
|
||||
/// Return true iff at least K documents have gone through
|
||||
/// the collector.
|
||||
#[inline]
|
||||
pub fn at_capacity(&self) -> bool {
|
||||
self.collector.at_capacity()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: FastValue + PartialOrd + Clone> Collector for TopFieldCollector<T> {
|
||||
fn set_segment(&mut self, segment_id: u32, segment: &SegmentReader) -> Result<()> {
|
||||
self.collector.set_segment_id(segment_id);
|
||||
self.fast_field = Some(segment.fast_field_reader(self.field)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, _score: Score) {
|
||||
let field_value = self
|
||||
.fast_field
|
||||
.as_ref()
|
||||
.expect("collect() was called before set_segment. This should never happen.")
|
||||
.get(doc);
|
||||
self.collector.collect(doc, field_value);
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use query::Query;
|
||||
use query::QueryParser;
|
||||
use schema::Field;
|
||||
use schema::IntOptions;
|
||||
use schema::Schema;
|
||||
use schema::{SchemaBuilder, FAST, TEXT};
|
||||
use Index;
|
||||
use IndexWriter;
|
||||
use TantivyError;
|
||||
|
||||
const TITLE: &str = "title";
|
||||
const SIZE: &str = "size";
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity() {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, FAST);
|
||||
let schema = schema_builder.build();
|
||||
let (index, query) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "growler of beer",
|
||||
size => 64u64,
|
||||
));
|
||||
index_writer.add_document(doc!(
|
||||
title => "pint of beer",
|
||||
size => 16u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.searcher();
|
||||
|
||||
let mut top_collector = TopFieldCollector::with_limit(size, 4);
|
||||
searcher.search(&*query, &mut top_collector).unwrap();
|
||||
assert!(!top_collector.at_capacity());
|
||||
|
||||
let score_docs: Vec<(u64, DocId)> = top_collector
|
||||
.top_docs()
|
||||
.into_iter()
|
||||
.map(|(field, doc_address)| (field, doc_address.doc()))
|
||||
.collect();
|
||||
assert_eq!(score_docs, vec![(64, 1), (16, 2), (12, 0)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_field_does_not_exist() {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, FAST);
|
||||
let schema = schema_builder.build();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(2), 4);
|
||||
let _ = top_collector.set_segment(0, segment);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_not_fast_field() {
|
||||
let mut schema_builder = SchemaBuilder::new();
|
||||
let title = schema_builder.add_text_field(TITLE, TEXT);
|
||||
let size = schema_builder.add_u64_field(SIZE, IntOptions::default());
|
||||
let schema = schema_builder.build();
|
||||
let (index, _) = index("beer", title, schema, |index_writer| {
|
||||
index_writer.add_document(doc!(
|
||||
title => "bottle of beer",
|
||||
size => 12u64,
|
||||
));
|
||||
});
|
||||
let searcher = index.searcher();
|
||||
let segment = searcher.segment_reader(0);
|
||||
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(size, 4);
|
||||
assert_matches!(
|
||||
top_collector.set_segment(0, segment),
|
||||
Err(TantivyError::FastFieldError(_))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_collect_before_set_segment() {
|
||||
let mut top_collector: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(0), 4);
|
||||
top_collector.collect(0, 0f32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_top_0() {
|
||||
let _: TopFieldCollector<u64> = TopFieldCollector::with_limit(Field(0), 0);
|
||||
}
|
||||
|
||||
fn index(
|
||||
query: &str,
|
||||
query_field: Field,
|
||||
schema: Schema,
|
||||
mut doc_adder: impl FnMut(&mut IndexWriter) -> (),
|
||||
) -> (Index, Box<Query>) {
|
||||
let index = Index::create_in_ram(schema);
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
doc_adder(&mut index_writer);
|
||||
index_writer.commit().unwrap();
|
||||
index.load_searchers().unwrap();
|
||||
|
||||
let query_parser = QueryParser::for_index(&index, vec![query_field]);
|
||||
let query = query_parser.parse_query(query).unwrap();
|
||||
(index, query)
|
||||
}
|
||||
}
|
||||
187
src/collector/top_score_collector.rs
Normal file
187
src/collector/top_score_collector.rs
Normal file
@@ -0,0 +1,187 @@
|
||||
use super::Collector;
|
||||
use collector::top_collector::TopCollector;
|
||||
use DocAddress;
|
||||
use DocId;
|
||||
use Result;
|
||||
use Score;
|
||||
use SegmentLocalId;
|
||||
use SegmentReader;
|
||||
|
||||
/// The Top Score Collector keeps track of the K documents
|
||||
/// sorted by their score.
|
||||
///
|
||||
/// The implementation is based on a `BinaryHeap`.
|
||||
/// The theorical complexity for collecting the top `K` out of `n` documents
|
||||
/// is `O(n log K)`.
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use]
|
||||
/// extern crate tantivy;
|
||||
/// use tantivy::schema::{SchemaBuilder, TEXT};
|
||||
/// use tantivy::{Index, Result, DocId, Score};
|
||||
/// use tantivy::collector::TopScoreCollector;
|
||||
/// use tantivy::query::QueryParser;
|
||||
///
|
||||
/// # fn main() { example().unwrap(); }
|
||||
/// fn example() -> Result<()> {
|
||||
/// let mut schema_builder = SchemaBuilder::new();
|
||||
/// let title = schema_builder.add_text_field("title", TEXT);
|
||||
/// let schema = schema_builder.build();
|
||||
/// let index = Index::create_in_ram(schema);
|
||||
/// {
|
||||
/// let mut index_writer = index.writer_with_num_threads(1, 3_000_000)?;
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Name of the Wind",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of Muadib",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "A Dairy Cow",
|
||||
/// ));
|
||||
/// index_writer.add_document(doc!(
|
||||
/// title => "The Diary of a Young Girl",
|
||||
/// ));
|
||||
/// index_writer.commit().unwrap();
|
||||
/// }
|
||||
///
|
||||
/// index.load_searchers()?;
|
||||
/// let searcher = index.searcher();
|
||||
///
|
||||
/// {
|
||||
/// let mut top_collector = TopScoreCollector::with_limit(2);
|
||||
/// let query_parser = QueryParser::for_index(&index, vec![title]);
|
||||
/// let query = query_parser.parse_query("diary")?;
|
||||
/// searcher.search(&*query, &mut top_collector).unwrap();
|
||||
///
|
||||
/// let score_docs: Vec<(Score, DocId)> = top_collector
|
||||
/// .top_docs()
|
||||
/// .into_iter()
|
||||
/// .map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
/// .collect();
|
||||
///
|
||||
/// assert_eq!(score_docs, vec![(0.7261542, 1), (0.6099695, 3)]);
|
||||
/// }
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
pub struct TopScoreCollector {
|
||||
collector: TopCollector<Score>,
|
||||
}
|
||||
|
||||
impl TopScoreCollector {
|
||||
/// Creates a top score collector, with a number of documents equal to "limit".
|
||||
///
|
||||
/// # Panics
|
||||
/// The method panics if limit is 0
|
||||
pub fn with_limit(limit: usize) -> TopScoreCollector {
|
||||
TopScoreCollector {
|
||||
collector: TopCollector::with_limit(limit),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns K best scored documents sorted in decreasing order.
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn docs(&self) -> Vec<DocAddress> {
|
||||
self.collector.docs()
|
||||
}
|
||||
|
||||
/// Returns K best ScoredDocuments sorted in decreasing order.
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
pub fn top_docs(&self) -> Vec<(Score, DocAddress)> {
|
||||
self.collector.top_docs()
|
||||
}
|
||||
|
||||
/// Returns K best ScoredDocuments sorted in decreasing order.
|
||||
///
|
||||
/// Calling this method triggers the sort.
|
||||
/// The result of the sort is not cached.
|
||||
#[deprecated]
|
||||
pub fn score_docs(&self) -> Vec<(Score, DocAddress)> {
|
||||
self.collector.top_docs()
|
||||
}
|
||||
|
||||
/// Return true iff at least K documents have gone through
|
||||
/// the collector.
|
||||
#[inline]
|
||||
pub fn at_capacity(&self) -> bool {
|
||||
self.collector.at_capacity()
|
||||
}
|
||||
}
|
||||
|
||||
impl Collector for TopScoreCollector {
|
||||
fn set_segment(&mut self, segment_id: SegmentLocalId, _: &SegmentReader) -> Result<()> {
|
||||
self.collector.set_segment_id(segment_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect(&mut self, doc: DocId, score: Score) {
|
||||
self.collector.collect(doc, score);
|
||||
}
|
||||
|
||||
fn requires_scoring(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use collector::Collector;
|
||||
use DocId;
|
||||
use Score;
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_not_at_capacity() {
|
||||
let mut top_collector = TopScoreCollector::with_limit(4);
|
||||
top_collector.collect(1, 0.8);
|
||||
top_collector.collect(3, 0.2);
|
||||
top_collector.collect(5, 0.3);
|
||||
assert!(!top_collector.at_capacity());
|
||||
let score_docs: Vec<(Score, DocId)> = top_collector
|
||||
.top_docs()
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
.collect();
|
||||
assert_eq!(score_docs, vec![(0.8, 1), (0.3, 5), (0.2, 3)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_top_collector_at_capacity() {
|
||||
let mut top_collector = TopScoreCollector::with_limit(4);
|
||||
top_collector.collect(1, 0.8);
|
||||
top_collector.collect(3, 0.2);
|
||||
top_collector.collect(5, 0.3);
|
||||
top_collector.collect(7, 0.9);
|
||||
top_collector.collect(9, -0.2);
|
||||
assert!(top_collector.at_capacity());
|
||||
{
|
||||
let score_docs: Vec<(Score, DocId)> = top_collector
|
||||
.top_docs()
|
||||
.into_iter()
|
||||
.map(|(score, doc_address)| (score, doc_address.doc()))
|
||||
.collect();
|
||||
assert_eq!(score_docs, vec![(0.9, 7), (0.8, 1), (0.3, 5), (0.2, 3)]);
|
||||
}
|
||||
{
|
||||
let docs: Vec<DocId> = top_collector
|
||||
.docs()
|
||||
.into_iter()
|
||||
.map(|doc_address| doc_address.doc())
|
||||
.collect();
|
||||
assert_eq!(docs, vec![7, 1, 5, 3]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_top_0() {
|
||||
TopScoreCollector::with_limit(0);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -102,6 +102,7 @@ where
|
||||
addr + 8 <= data.len(),
|
||||
"The fast field field should have been padded with 7 bytes."
|
||||
);
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
|
||||
let val_unshifted_unmasked: u64 =
|
||||
u64::from_le(unsafe { ptr::read_unaligned(data[addr..].as_ptr() as *const u64) });
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
@@ -125,6 +126,7 @@ where
|
||||
for output_val in output.iter_mut() {
|
||||
let addr = addr_in_bits >> 3;
|
||||
let bit_shift = addr_in_bits & 7;
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
|
||||
let val_unshifted_unmasked: u64 =
|
||||
unsafe { ptr::read_unaligned(data[addr..].as_ptr() as *const u64) };
|
||||
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
|
||||
|
||||
@@ -34,17 +34,17 @@ impl TinySet {
|
||||
}
|
||||
|
||||
/// Returns the complement of the set in `[0, 64[`.
|
||||
fn complement(&self) -> TinySet {
|
||||
fn complement(self) -> TinySet {
|
||||
TinySet(!self.0)
|
||||
}
|
||||
|
||||
/// Returns true iff the `TinySet` contains the element `el`.
|
||||
pub fn contains(&self, el: u32) -> bool {
|
||||
pub fn contains(self, el: u32) -> bool {
|
||||
!self.intersect(TinySet::singleton(el)).is_empty()
|
||||
}
|
||||
|
||||
/// Returns the intersection of `self` and `other`
|
||||
pub fn intersect(&self, other: TinySet) -> TinySet {
|
||||
pub fn intersect(self, other: TinySet) -> TinySet {
|
||||
TinySet(self.0 & other.0)
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ impl TinySet {
|
||||
|
||||
/// Returns true iff the `TinySet` is empty.
|
||||
#[inline(always)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
pub fn is_empty(self) -> bool {
|
||||
self.0 == 0u64
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ impl TinySet {
|
||||
self.0 = 0u64;
|
||||
}
|
||||
|
||||
pub fn len(&self) -> u32 {
|
||||
pub fn len(self) -> u32 {
|
||||
self.0.count_ones()
|
||||
}
|
||||
}
|
||||
@@ -266,14 +266,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_bitset_large() {
|
||||
let arr = generate_nonunique_unsorted(1_000_000, 50_000);
|
||||
let arr = generate_nonunique_unsorted(100_000, 5_000);
|
||||
let mut btreeset: BTreeSet<u32> = BTreeSet::new();
|
||||
let mut bitset = BitSet::with_max_value(1_000_000);
|
||||
let mut bitset = BitSet::with_max_value(100_000);
|
||||
for el in arr {
|
||||
btreeset.insert(el);
|
||||
bitset.insert(el);
|
||||
}
|
||||
for i in 0..1_000_000 {
|
||||
for i in 0..100_000 {
|
||||
assert_eq!(btreeset.contains(&i), bitset.contains(i));
|
||||
}
|
||||
assert_eq!(btreeset.len(), bitset.len());
|
||||
|
||||
@@ -4,6 +4,8 @@ use common::VInt;
|
||||
use directory::ReadOnlySource;
|
||||
use directory::WritePtr;
|
||||
use schema::Field;
|
||||
use space_usage::PerFieldSpaceUsage;
|
||||
use space_usage::FieldUsage;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::io::{self, Read};
|
||||
@@ -72,7 +74,8 @@ impl<W: Write> CompositeWrite<W> {
|
||||
let footer_offset = self.write.written_bytes();
|
||||
VInt(self.offsets.len() as u64).serialize(&mut self.write)?;
|
||||
|
||||
let mut offset_fields: Vec<_> = self.offsets
|
||||
let mut offset_fields: Vec<_> = self
|
||||
.offsets
|
||||
.iter()
|
||||
.map(|(file_addr, offset)| (*offset, *file_addr))
|
||||
.collect();
|
||||
@@ -165,6 +168,16 @@ impl CompositeFile {
|
||||
.get(&FileAddr { field, idx })
|
||||
.map(|&(from, to)| self.data.slice(from, to))
|
||||
}
|
||||
|
||||
pub fn space_usage(&self) -> PerFieldSpaceUsage {
|
||||
let mut fields = HashMap::new();
|
||||
for (&field_addr, &(start, end)) in self.offsets_index.iter() {
|
||||
fields.entry(field_addr.field)
|
||||
.or_insert_with(|| FieldUsage::empty(field_addr.field))
|
||||
.add_field_idx(field_addr.idx, end - start);
|
||||
}
|
||||
PerFieldSpaceUsage::new(fields)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -10,8 +10,6 @@ pub struct VInt(pub u64);
|
||||
const STOP_BIT: u8 = 128;
|
||||
|
||||
impl VInt {
|
||||
|
||||
|
||||
pub fn val(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
@@ -20,14 +18,13 @@ impl VInt {
|
||||
VInt::deserialize(reader).map(|vint| vint.0)
|
||||
}
|
||||
|
||||
pub fn serialize_into_vec(&self, output: &mut Vec<u8>){
|
||||
pub fn serialize_into_vec(&self, output: &mut Vec<u8>) {
|
||||
let mut buffer = [0u8; 10];
|
||||
let num_bytes = self.serialize_into(&mut buffer);
|
||||
output.extend(&buffer[0..num_bytes]);
|
||||
}
|
||||
|
||||
fn serialize_into(&self, buffer: &mut [u8; 10]) -> usize {
|
||||
|
||||
let mut remaining = self.0;
|
||||
for (i, b) in buffer.iter_mut().enumerate() {
|
||||
let next_byte: u8 = (remaining % 128u64) as u8;
|
||||
@@ -74,7 +71,6 @@ impl BinarySerializable for VInt {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -89,10 +85,10 @@ mod tests {
|
||||
}
|
||||
assert!(num_bytes > 0);
|
||||
if num_bytes < 10 {
|
||||
assert!(1u64 << (7*num_bytes) > val);
|
||||
assert!(1u64 << (7 * num_bytes) > val);
|
||||
}
|
||||
if num_bytes > 1 {
|
||||
assert!(1u64 << (7*(num_bytes-1)) <= val);
|
||||
assert!(1u64 << (7 * (num_bytes - 1)) <= val);
|
||||
}
|
||||
let serdeser_val = VInt::deserialize(&mut &v[..]).unwrap();
|
||||
assert_eq!(val, serdeser_val.0);
|
||||
@@ -105,11 +101,11 @@ mod tests {
|
||||
aux_test_vint(5);
|
||||
aux_test_vint(u64::max_value());
|
||||
for i in 1..9 {
|
||||
let power_of_128 = 1u64 << (7*i);
|
||||
let power_of_128 = 1u64 << (7 * i);
|
||||
aux_test_vint(power_of_128 - 1u64);
|
||||
aux_test_vint(power_of_128 );
|
||||
aux_test_vint(power_of_128);
|
||||
aux_test_vint(power_of_128 + 1u64);
|
||||
}
|
||||
aux_test_vint(10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,34 +1,36 @@
|
||||
use core::SegmentId;
|
||||
use error::TantivyError;
|
||||
use schema::Schema;
|
||||
use serde_json;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::fmt;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use Result;
|
||||
|
||||
use super::pool::LeasedItem;
|
||||
use super::pool::Pool;
|
||||
use super::segment::create_segment;
|
||||
use super::segment::Segment;
|
||||
use core::searcher::Searcher;
|
||||
use core::IndexMeta;
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use core::SegmentReader;
|
||||
use core::META_FILEPATH;
|
||||
use directory::ManagedDirectory;
|
||||
#[cfg(feature = "mmap")]
|
||||
use directory::MmapDirectory;
|
||||
use directory::{Directory, RAMDirectory};
|
||||
use directory::{DirectoryClone, ManagedDirectory};
|
||||
use error::TantivyError;
|
||||
use indexer::index_writer::open_index_writer;
|
||||
use indexer::index_writer::HEAP_SIZE_MIN;
|
||||
use indexer::segment_updater::save_new_metas;
|
||||
use indexer::DirectoryLock;
|
||||
use indexer::LockType;
|
||||
use num_cpus;
|
||||
use schema::Field;
|
||||
use schema::FieldType;
|
||||
use schema::Schema;
|
||||
use serde_json;
|
||||
use std::borrow::BorrowMut;
|
||||
use std::fmt;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use tokenizer::BoxedTokenizer;
|
||||
use tokenizer::TokenizerManager;
|
||||
use IndexWriter;
|
||||
use Result;
|
||||
|
||||
fn load_metas(directory: &Directory) -> Result<IndexMeta> {
|
||||
let meta_data = directory.atomic_read(&META_FILEPATH)?;
|
||||
@@ -47,6 +49,11 @@ pub struct Index {
|
||||
}
|
||||
|
||||
impl Index {
|
||||
/// Examines the director to see if it contains an index
|
||||
pub fn exists<Dir: Directory>(dir: &Dir) -> bool {
|
||||
dir.exists(&META_FILEPATH)
|
||||
}
|
||||
|
||||
/// Creates a new index using the `RAMDirectory`.
|
||||
///
|
||||
/// The index will be allocated in anonymous memory.
|
||||
@@ -63,9 +70,28 @@ impl Index {
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn create_in_dir<P: AsRef<Path>>(directory_path: P, schema: Schema) -> Result<Index> {
|
||||
let mmap_directory = MmapDirectory::open(directory_path)?;
|
||||
if Index::exists(&mmap_directory) {
|
||||
return Err(TantivyError::IndexAlreadyExists);
|
||||
}
|
||||
|
||||
Index::create(mmap_directory, schema)
|
||||
}
|
||||
|
||||
/// Opens or creates a new index in the provided directory
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn open_or_create<Dir: Directory>(dir: Dir, schema: Schema) -> Result<Index> {
|
||||
if Index::exists(&dir) {
|
||||
let index = Index::open(dir)?;
|
||||
if index.schema() == schema {
|
||||
Ok(index)
|
||||
} else {
|
||||
Err(TantivyError::SchemaError("An index exists but the schema does not match.".to_string()))
|
||||
}
|
||||
} else {
|
||||
Index::create(dir, schema)
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new index in a temp directory.
|
||||
///
|
||||
/// The index will use the `MMapDirectory` in a newly created directory.
|
||||
@@ -87,6 +113,8 @@ impl Index {
|
||||
}
|
||||
|
||||
/// Create a new index from a directory.
|
||||
///
|
||||
/// This will overwrite existing meta.json
|
||||
fn from_directory(mut directory: ManagedDirectory, schema: Schema) -> Result<Index> {
|
||||
save_new_metas(schema.clone(), 0, directory.borrow_mut())?;
|
||||
let metas = IndexMeta::with_schema(schema);
|
||||
@@ -113,6 +141,27 @@ impl Index {
|
||||
&self.tokenizers
|
||||
}
|
||||
|
||||
/// Helper to access the tokenizer associated to a specific field.
|
||||
pub fn tokenizer_for_field(&self, field: Field) -> Result<Box<BoxedTokenizer>> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let field_type = field_entry.field_type();
|
||||
let tokenizer_manager: &TokenizerManager = self.tokenizers();
|
||||
let tokenizer_name_opt: Option<Box<BoxedTokenizer>> = match field_type {
|
||||
FieldType::Str(text_options) => text_options
|
||||
.get_indexing_options()
|
||||
.map(|text_indexing_options| text_indexing_options.tokenizer().to_string())
|
||||
.and_then(|tokenizer_name| tokenizer_manager.get(&tokenizer_name)),
|
||||
_ => None,
|
||||
};
|
||||
match tokenizer_name_opt {
|
||||
Some(tokenizer) => Ok(tokenizer),
|
||||
None => Err(TantivyError::SchemaError(format!(
|
||||
"{:?} is not a text field.",
|
||||
field_entry.name()
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens a new directory from an index path.
|
||||
#[cfg(feature = "mmap")]
|
||||
pub fn open_in_dir<P: AsRef<Path>>(directory_path: P) -> Result<Index> {
|
||||
@@ -156,7 +205,7 @@ impl Index {
|
||||
num_threads: usize,
|
||||
overall_heap_size_in_bytes: usize,
|
||||
) -> Result<IndexWriter> {
|
||||
let directory_lock = DirectoryLock::lock(self.directory().box_clone())?;
|
||||
let directory_lock = LockType::IndexWriterLock.acquire_lock(&self.directory)?;
|
||||
let heap_size_in_bytes_per_thread = overall_heap_size_in_bytes / num_threads;
|
||||
open_index_writer(
|
||||
self,
|
||||
@@ -194,7 +243,8 @@ impl Index {
|
||||
|
||||
/// Returns the list of segments that are searchable
|
||||
pub fn searchable_segments(&self) -> Result<Vec<Segment>> {
|
||||
Ok(self.searchable_segment_metas()?
|
||||
Ok(self
|
||||
.searchable_segment_metas()?
|
||||
.into_iter()
|
||||
.map(|segment_meta| self.segment(segment_meta))
|
||||
.collect())
|
||||
@@ -229,7 +279,8 @@ impl Index {
|
||||
|
||||
/// Returns the list of segment ids that are searchable.
|
||||
pub fn searchable_segment_ids(&self) -> Result<Vec<SegmentId>> {
|
||||
Ok(self.searchable_segment_metas()?
|
||||
Ok(self
|
||||
.searchable_segment_metas()?
|
||||
.iter()
|
||||
.map(|segment_meta| segment_meta.id())
|
||||
.collect())
|
||||
@@ -242,13 +293,18 @@ impl Index {
|
||||
self.num_searchers.store(num_searchers, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Creates a new generation of searchers after
|
||||
|
||||
/// a change of the set of searchable indexes.
|
||||
/// Update searchers so that they reflect the state of the last
|
||||
/// `.commit()`.
|
||||
///
|
||||
/// This needs to be called when a new segment has been
|
||||
/// published or after a merge.
|
||||
/// If indexing happens in the same process as searching,
|
||||
/// you most likely want to call `.load_searchers()` right after each
|
||||
/// successful call to `.commit()`.
|
||||
///
|
||||
/// If indexing and searching happen in different processes, the way to
|
||||
/// get the freshest `index` at all time, is to watch `meta.json` and
|
||||
/// call `load_searchers` whenever a changes happen.
|
||||
pub fn load_searchers(&self) -> Result<()> {
|
||||
let _meta_lock = LockType::MetaLock.acquire_lock(self.directory())?;
|
||||
let searchable_segments = self.searchable_segments()?;
|
||||
let segment_readers: Vec<SegmentReader> = searchable_segments
|
||||
.iter()
|
||||
@@ -257,7 +313,7 @@ impl Index {
|
||||
let schema = self.schema();
|
||||
let num_searchers: usize = self.num_searchers.load(Ordering::Acquire);
|
||||
let searchers = (0..num_searchers)
|
||||
.map(|_| Searcher::new(schema.clone(), segment_readers.clone()))
|
||||
.map(|_| Searcher::new(schema.clone(), self.clone(), segment_readers.clone()))
|
||||
.collect();
|
||||
self.searcher_pool.publish_new_generation(searchers);
|
||||
Ok(())
|
||||
@@ -295,3 +351,73 @@ impl Clone for Index {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use schema::{Schema, SchemaBuilder, INT_INDEXED, TEXT};
|
||||
use Index;
|
||||
use directory::RAMDirectory;
|
||||
|
||||
#[test]
|
||||
fn test_indexer_for_field() {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let num_likes_field = schema_builder.add_u64_field("num_likes", INT_INDEXED);
|
||||
let body_field = schema_builder.add_text_field("body", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
assert!(index.tokenizer_for_field(body_field).is_ok());
|
||||
assert_eq!(
|
||||
format!("{:?}", index.tokenizer_for_field(num_likes_field).err()),
|
||||
"Some(SchemaError(\"\\\"num_likes\\\" is not a text field.\"))"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_exists() {
|
||||
let directory = RAMDirectory::create();
|
||||
assert!(!Index::exists(&directory));
|
||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||
assert!(Index::exists(&directory));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn open_or_create_should_create() {
|
||||
let directory = RAMDirectory::create();
|
||||
assert!(!Index::exists(&directory));
|
||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||
assert!(Index::exists(&directory));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn open_or_create_should_open() {
|
||||
let directory = RAMDirectory::create();
|
||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||
assert!(Index::exists(&directory));
|
||||
assert!(Index::open_or_create(directory, throw_away_schema()).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_should_wipeoff_existing() {
|
||||
let directory = RAMDirectory::create();
|
||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||
assert!(Index::exists(&directory));
|
||||
assert!(Index::create(directory.clone(), SchemaBuilder::default().build()).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn open_or_create_exists_but_schema_does_not_match() {
|
||||
let directory = RAMDirectory::create();
|
||||
assert!(Index::create(directory.clone(), throw_away_schema()).is_ok());
|
||||
assert!(Index::exists(&directory));
|
||||
assert!(Index::open_or_create(directory.clone(), throw_away_schema()).is_ok());
|
||||
let err = Index::open_or_create(directory, SchemaBuilder::default().build());
|
||||
assert_eq!(format!("{:?}", err.unwrap_err()), "SchemaError(\"An index exists but the schema does not match.\")");
|
||||
}
|
||||
|
||||
fn throw_away_schema() -> Schema {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let _ = schema_builder.add_u64_field("num_likes", INT_INDEXED);
|
||||
schema_builder.build()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ mod tests {
|
||||
};
|
||||
let index_metas = IndexMeta {
|
||||
segments: Vec::new(),
|
||||
schema: schema,
|
||||
schema,
|
||||
opstamp: 0u64,
|
||||
payload: None,
|
||||
};
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use common::BinarySerializable;
|
||||
use directory::ReadOnlySource;
|
||||
use owned_read::OwnedRead;
|
||||
use positions::PositionReader;
|
||||
use postings::TermInfo;
|
||||
use postings::{BlockSegmentPostings, SegmentPostings};
|
||||
use schema::FieldType;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::Term;
|
||||
use termdict::TermDictionary;
|
||||
use owned_read::OwnedRead;
|
||||
use positions::PositionReader;
|
||||
|
||||
/// The inverted index reader is in charge of accessing
|
||||
/// the inverted index associated to a specific field.
|
||||
@@ -32,6 +32,10 @@ pub struct InvertedIndexReader {
|
||||
}
|
||||
|
||||
impl InvertedIndexReader {
|
||||
#[cfg_attr(
|
||||
feature = "cargo-clippy",
|
||||
allow(clippy::needless_pass_by_value)
|
||||
)] // for symetry
|
||||
pub(crate) fn new(
|
||||
termdict: TermDictionary,
|
||||
postings_source: ReadOnlySource,
|
||||
@@ -54,12 +58,12 @@ impl InvertedIndexReader {
|
||||
|
||||
/// Creates an empty `InvertedIndexReader` object, which
|
||||
/// contains no terms at all.
|
||||
pub fn empty(field_type: FieldType) -> InvertedIndexReader {
|
||||
pub fn empty(field_type: &FieldType) -> InvertedIndexReader {
|
||||
let record_option = field_type
|
||||
.get_index_record_option()
|
||||
.unwrap_or(IndexRecordOption::Basic);
|
||||
InvertedIndexReader {
|
||||
termdict: TermDictionary::empty(field_type),
|
||||
termdict: TermDictionary::empty(&field_type),
|
||||
postings_source: ReadOnlySource::empty(),
|
||||
positions_source: ReadOnlySource::empty(),
|
||||
positions_idx_source: ReadOnlySource::empty(),
|
||||
@@ -100,7 +104,6 @@ impl InvertedIndexReader {
|
||||
block_postings.reset(term_info.doc_freq, postings_reader);
|
||||
}
|
||||
|
||||
|
||||
/// Returns a block postings given a `Term`.
|
||||
/// This method is for an advanced usage only.
|
||||
///
|
||||
@@ -111,7 +114,7 @@ impl InvertedIndexReader {
|
||||
option: IndexRecordOption,
|
||||
) -> Option<BlockSegmentPostings> {
|
||||
self.get_term_info(term)
|
||||
.map(move|term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||
.map(move |term_info| self.read_block_postings_from_terminfo(&term_info, option))
|
||||
}
|
||||
|
||||
/// Returns a block postings given a `term_info`.
|
||||
@@ -147,7 +150,8 @@ impl InvertedIndexReader {
|
||||
if option.has_positions() {
|
||||
let position_reader = self.positions_source.clone();
|
||||
let skip_reader = self.positions_idx_source.clone();
|
||||
let position_reader = PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
|
||||
let position_reader =
|
||||
PositionReader::new(position_reader, skip_reader, term_info.positions_idx);
|
||||
Some(position_reader)
|
||||
} else {
|
||||
None
|
||||
|
||||
@@ -33,10 +33,4 @@ lazy_static! {
|
||||
/// Removing this file is safe, but will prevent the garbage collection of all of the file that
|
||||
/// are currently in the directory
|
||||
pub static ref MANAGED_FILEPATH: PathBuf = PathBuf::from(".managed.json");
|
||||
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
pub static ref LOCKFILE_FILEPATH: PathBuf = PathBuf::from(".tantivy-indexer.lock");
|
||||
}
|
||||
|
||||
@@ -87,7 +87,8 @@ impl<T> Deref for LeasedItem<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.gen_item
|
||||
&self
|
||||
.gen_item
|
||||
.as_ref()
|
||||
.expect("Unwrapping a leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
@@ -96,7 +97,8 @@ impl<T> Deref for LeasedItem<T> {
|
||||
|
||||
impl<T> DerefMut for LeasedItem<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.gen_item
|
||||
&mut self
|
||||
.gen_item
|
||||
.as_mut()
|
||||
.expect("Unwrapping a mut leased item should never fail")
|
||||
.item // unwrap is safe here
|
||||
|
||||
@@ -5,10 +5,12 @@ use query::Query;
|
||||
use schema::Document;
|
||||
use schema::Schema;
|
||||
use schema::{Field, Term};
|
||||
use space_usage::SearcherSpaceUsage;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use termdict::TermMerger;
|
||||
use DocAddress;
|
||||
use Index;
|
||||
use Result;
|
||||
|
||||
/// Holds a list of `SegmentReader`s ready for search.
|
||||
@@ -18,23 +20,35 @@ use Result;
|
||||
///
|
||||
pub struct Searcher {
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
}
|
||||
|
||||
impl Searcher {
|
||||
/// Creates a new `Searcher`
|
||||
pub(crate) fn new(schema: Schema, segment_readers: Vec<SegmentReader>) -> Searcher {
|
||||
pub(crate) fn new(
|
||||
schema: Schema,
|
||||
index: Index,
|
||||
segment_readers: Vec<SegmentReader>,
|
||||
) -> Searcher {
|
||||
Searcher {
|
||||
schema,
|
||||
index,
|
||||
segment_readers,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `Index` associated to the `Searcher`
|
||||
pub fn index(&self) -> &Index {
|
||||
&self.index
|
||||
}
|
||||
|
||||
/// Fetches a document from tantivy's store given a `DocAddress`.
|
||||
///
|
||||
/// The searcher uses the segment ordinal to route the
|
||||
/// the request to the right `Segment`.
|
||||
pub fn doc(&self, doc_address: &DocAddress) -> Result<Document> {
|
||||
let DocAddress(segment_local_id, doc_id) = *doc_address;
|
||||
pub fn doc(&self, doc_address: DocAddress) -> Result<Document> {
|
||||
let DocAddress(segment_local_id, doc_id) = doc_address;
|
||||
let segment_reader = &self.segment_readers[segment_local_id as usize];
|
||||
segment_reader.doc(doc_id)
|
||||
}
|
||||
@@ -48,7 +62,7 @@ impl Searcher {
|
||||
pub fn num_docs(&self) -> u64 {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.num_docs() as u64)
|
||||
.map(|segment_reader| u64::from(segment_reader.num_docs()))
|
||||
.sum::<u64>()
|
||||
}
|
||||
|
||||
@@ -57,8 +71,9 @@ impl Searcher {
|
||||
pub fn doc_freq(&self, term: &Term) -> u64 {
|
||||
self.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.inverted_index(term.field()).doc_freq(term) as u64)
|
||||
.sum::<u64>()
|
||||
.map(|segment_reader| {
|
||||
u64::from(segment_reader.inverted_index(term.field()).doc_freq(term))
|
||||
}).sum::<u64>()
|
||||
}
|
||||
|
||||
/// Return the list of segment readers
|
||||
@@ -78,12 +93,22 @@ impl Searcher {
|
||||
|
||||
/// Return the field searcher associated to a `Field`.
|
||||
pub fn field(&self, field: Field) -> FieldSearcher {
|
||||
let inv_index_readers = self.segment_readers
|
||||
let inv_index_readers = self
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.inverted_index(field))
|
||||
.collect::<Vec<_>>();
|
||||
FieldSearcher::new(inv_index_readers)
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this searcher.
|
||||
pub fn space_usage(&self) -> SearcherSpaceUsage {
|
||||
let mut space_usage = SearcherSpaceUsage::new();
|
||||
for segment_reader in self.segment_readers.iter() {
|
||||
space_usage.add_segment(segment_reader.space_usage());
|
||||
}
|
||||
space_usage
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FieldSearcher {
|
||||
@@ -98,7 +123,8 @@ impl FieldSearcher {
|
||||
/// Returns a Stream over all of the sorted unique terms of
|
||||
/// for the given field.
|
||||
pub fn terms(&self) -> TermMerger {
|
||||
let term_streamers: Vec<_> = self.inv_index_readers
|
||||
let term_streamers: Vec<_> = self
|
||||
.inv_index_readers
|
||||
.iter()
|
||||
.map(|inverted_index| inverted_index.terms().stream())
|
||||
.collect();
|
||||
@@ -108,7 +134,8 @@ impl FieldSearcher {
|
||||
|
||||
impl fmt::Debug for Searcher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let segment_ids = self.segment_readers
|
||||
let segment_ids = self
|
||||
.segment_readers
|
||||
.iter()
|
||||
.map(|segment_reader| segment_reader.segment_id())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -52,12 +52,12 @@ impl SegmentId {
|
||||
/// Picking the first 8 chars is ok to identify
|
||||
/// segments in a display message.
|
||||
pub fn short_uuid_string(&self) -> String {
|
||||
(&self.0.simple().to_string()[..8]).to_string()
|
||||
(&self.0.to_simple_ref().to_string()[..8]).to_string()
|
||||
}
|
||||
|
||||
/// Returns a segment uuid string.
|
||||
pub fn uuid_string(&self) -> String {
|
||||
self.0.simple().to_string()
|
||||
self.0.to_simple_ref().to_string()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ impl<'a> serde::Deserialize<'a> for SegmentMeta {
|
||||
{
|
||||
let inner = InnerSegmentMeta::deserialize(deserializer)?;
|
||||
let tracked = INVENTORY.track(inner);
|
||||
Ok(SegmentMeta { tracked: tracked })
|
||||
Ok(SegmentMeta { tracked })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ use core::InvertedIndexReader;
|
||||
use core::Segment;
|
||||
use core::SegmentComponent;
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use error::TantivyError;
|
||||
use fastfield::DeleteBitSet;
|
||||
use fastfield::FacetReader;
|
||||
@@ -17,6 +16,7 @@ use schema::Document;
|
||||
use schema::Field;
|
||||
use schema::FieldType;
|
||||
use schema::Schema;
|
||||
use space_usage::SegmentSpaceUsage;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
@@ -44,7 +44,8 @@ pub struct SegmentReader {
|
||||
inv_idx_reader_cache: Arc<RwLock<HashMap<Field, Arc<InvertedIndexReader>>>>,
|
||||
|
||||
segment_id: SegmentId,
|
||||
segment_meta: SegmentMeta,
|
||||
max_doc: DocId,
|
||||
num_docs: DocId,
|
||||
|
||||
termdict_composite: CompositeFile,
|
||||
postings_composite: CompositeFile,
|
||||
@@ -64,7 +65,7 @@ impl SegmentReader {
|
||||
/// Today, `tantivy` does not handle deletes, so it happens
|
||||
/// to also be the number of documents in the index.
|
||||
pub fn max_doc(&self) -> DocId {
|
||||
self.segment_meta.max_doc()
|
||||
self.max_doc
|
||||
}
|
||||
|
||||
/// Returns the number of documents.
|
||||
@@ -73,7 +74,7 @@ impl SegmentReader {
|
||||
/// Today, `tantivy` does not handle deletes so max doc and
|
||||
/// num_docs are the same.
|
||||
pub fn num_docs(&self) -> DocId {
|
||||
self.segment_meta.num_docs()
|
||||
self.num_docs
|
||||
}
|
||||
|
||||
/// Returns the schema of the index this segment belongs to.
|
||||
@@ -153,15 +154,17 @@ impl SegmentReader {
|
||||
/// Accessor to the `BytesFastFieldReader` associated to a given `Field`.
|
||||
pub fn bytes_fast_field_reader(&self, field: Field) -> fastfield::Result<BytesFastFieldReader> {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
match field_entry.field_type() {
|
||||
&FieldType::Bytes => {}
|
||||
match *field_entry.field_type() {
|
||||
FieldType::Bytes => {}
|
||||
_ => return Err(FastFieldNotAvailableError::new(field_entry)),
|
||||
}
|
||||
let idx_reader = self.fast_fields_composite
|
||||
let idx_reader = self
|
||||
.fast_fields_composite
|
||||
.open_read_with_idx(field, 0)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))
|
||||
.map(FastFieldReader::open)?;
|
||||
let values = self.fast_fields_composite
|
||||
let values = self
|
||||
.fast_fields_composite
|
||||
.open_read_with_idx(field, 1)
|
||||
.ok_or_else(|| FastFieldNotAvailableError::new(field_entry))?;
|
||||
Ok(BytesFastFieldReader::open(idx_reader, values))
|
||||
@@ -175,7 +178,7 @@ impl SegmentReader {
|
||||
"The field {:?} is not a \
|
||||
hierarchical facet.",
|
||||
field_entry
|
||||
)).into());
|
||||
)));
|
||||
}
|
||||
let term_ords_reader = self.multi_fast_field_reader(field)?;
|
||||
let termdict_source = self.termdict_composite.open_read(field).ok_or_else(|| {
|
||||
@@ -186,7 +189,7 @@ impl SegmentReader {
|
||||
field_entry.name()
|
||||
))
|
||||
})?;
|
||||
let termdict = TermDictionary::from_source(termdict_source);
|
||||
let termdict = TermDictionary::from_source(&termdict_source);
|
||||
let facet_reader = FacetReader::new(term_ords_reader, termdict);
|
||||
Ok(facet_reader)
|
||||
}
|
||||
@@ -225,6 +228,8 @@ impl SegmentReader {
|
||||
let store_source = segment.open_read(SegmentComponent::STORE)?;
|
||||
let store_reader = StoreReader::from_source(store_source);
|
||||
|
||||
fail_point!("SegmentReader::open#middle");
|
||||
|
||||
let postings_source = segment.open_read(SegmentComponent::POSTINGS)?;
|
||||
let postings_composite = CompositeFile::open(&postings_source)?;
|
||||
|
||||
@@ -260,7 +265,8 @@ impl SegmentReader {
|
||||
let schema = segment.schema();
|
||||
Ok(SegmentReader {
|
||||
inv_idx_reader_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
segment_meta: segment.meta().clone(),
|
||||
max_doc: segment.meta().max_doc(),
|
||||
num_docs: segment.meta().num_docs(),
|
||||
termdict_composite,
|
||||
postings_composite,
|
||||
fast_fields_composite,
|
||||
@@ -282,7 +288,8 @@ impl SegmentReader {
|
||||
/// term dictionary associated to a specific field,
|
||||
/// and opening the posting list associated to any term.
|
||||
pub fn inverted_index(&self, field: Field) -> Arc<InvertedIndexReader> {
|
||||
if let Some(inv_idx_reader) = self.inv_idx_reader_cache
|
||||
if let Some(inv_idx_reader) = self
|
||||
.inv_idx_reader_cache
|
||||
.read()
|
||||
.expect("Lock poisoned. This should never happen")
|
||||
.get(&field)
|
||||
@@ -306,25 +313,28 @@ impl SegmentReader {
|
||||
// As a result, no data is associated to the inverted index.
|
||||
//
|
||||
// Returns an empty inverted index.
|
||||
return Arc::new(InvertedIndexReader::empty(field_type.clone()));
|
||||
return Arc::new(InvertedIndexReader::empty(field_type));
|
||||
}
|
||||
|
||||
let postings_source = postings_source_opt.unwrap();
|
||||
|
||||
let termdict_source = self.termdict_composite
|
||||
let termdict_source = self
|
||||
.termdict_composite
|
||||
.open_read(field)
|
||||
.expect("Failed to open field term dictionary in composite file. Is the field indexed");
|
||||
|
||||
let positions_source = self.positions_composite
|
||||
let positions_source = self
|
||||
.positions_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let positions_idx_source = self.positions_idx_composite
|
||||
let positions_idx_source = self
|
||||
.positions_idx_composite
|
||||
.open_read(field)
|
||||
.expect("Index corrupted. Failed to open field positions in composite file.");
|
||||
|
||||
let inv_idx_reader = Arc::new(InvertedIndexReader::new(
|
||||
TermDictionary::from_source(termdict_source),
|
||||
TermDictionary::from_source(&termdict_source),
|
||||
postings_source,
|
||||
positions_source,
|
||||
positions_idx_source,
|
||||
@@ -372,6 +382,21 @@ impl SegmentReader {
|
||||
pub fn doc_ids_alive(&self) -> SegmentReaderAliveDocsIterator {
|
||||
SegmentReaderAliveDocsIterator::new(&self)
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this segment.
|
||||
pub fn space_usage(&self) -> SegmentSpaceUsage {
|
||||
SegmentSpaceUsage::new(
|
||||
self.num_docs(),
|
||||
self.termdict_composite.space_usage(),
|
||||
self.postings_composite.space_usage(),
|
||||
self.positions_composite.space_usage(),
|
||||
self.positions_idx_composite.space_usage(),
|
||||
self.fast_fields_composite.space_usage(),
|
||||
self.fieldnorms_composite.space_usage(),
|
||||
self.store_reader.space_usage(),
|
||||
self.delete_bitset_opt.as_ref().map(|x| x.space_usage()).unwrap_or(0),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SegmentReader {
|
||||
@@ -391,7 +416,7 @@ pub struct SegmentReaderAliveDocsIterator<'a> {
|
||||
impl<'a> SegmentReaderAliveDocsIterator<'a> {
|
||||
pub fn new(reader: &'a SegmentReader) -> SegmentReaderAliveDocsIterator<'a> {
|
||||
SegmentReaderAliveDocsIterator {
|
||||
reader: reader,
|
||||
reader,
|
||||
max_doc: reader.max_doc(),
|
||||
current: 0,
|
||||
}
|
||||
|
||||
@@ -77,15 +77,15 @@ pub trait Directory: DirectoryClone + fmt::Debug + Send + Sync + 'static {
|
||||
|
||||
/// DirectoryClone
|
||||
pub trait DirectoryClone {
|
||||
/// Clones the directory and boxes the clone
|
||||
fn box_clone(&self) -> Box<Directory>;
|
||||
/// Clones the directory and boxes the clone
|
||||
fn box_clone(&self) -> Box<Directory>;
|
||||
}
|
||||
|
||||
impl<T> DirectoryClone for T
|
||||
where
|
||||
T: 'static + Directory + Clone,
|
||||
T: 'static + Directory + Clone,
|
||||
{
|
||||
fn box_clone(&self) -> Box<Directory> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
fn box_clone(&self) -> Box<Directory> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ use core::MANAGED_FILEPATH;
|
||||
use directory::error::{DeleteError, IOError, OpenReadError, OpenWriteError};
|
||||
use directory::{ReadOnlySource, WritePtr};
|
||||
use error::TantivyError;
|
||||
use indexer::LockType;
|
||||
use serde_json;
|
||||
use std::collections::HashSet;
|
||||
use std::io;
|
||||
@@ -13,6 +14,17 @@ use std::sync::{Arc, RwLock};
|
||||
use Directory;
|
||||
use Result;
|
||||
|
||||
/// Returns true iff the file is "managed".
|
||||
/// Non-managed file are not subject to garbage collection.
|
||||
///
|
||||
/// Filenames that starts by a "." -typically locks-
|
||||
/// are not managed.
|
||||
fn is_managed(path: &Path) -> bool {
|
||||
path.to_str()
|
||||
.map(|p_str| !p_str.starts_with('.'))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
/// Wrapper of directories that keeps track of files created by Tantivy.
|
||||
///
|
||||
/// A managed directory is just a wrapper of a directory
|
||||
@@ -40,7 +52,7 @@ fn save_managed_paths(
|
||||
wlock: &RwLockWriteGuard<MetaInformation>,
|
||||
) -> io::Result<()> {
|
||||
let mut w = serde_json::to_vec(&wlock.managed_paths)?;
|
||||
write!(&mut w, "\n")?;
|
||||
writeln!(&mut w)?;
|
||||
directory.atomic_write(&MANAGED_FILEPATH, &w[..])?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -82,25 +94,35 @@ impl ManagedDirectory {
|
||||
pub fn garbage_collect<L: FnOnce() -> HashSet<PathBuf>>(&mut self, get_living_files: L) {
|
||||
info!("Garbage collect");
|
||||
let mut files_to_delete = vec![];
|
||||
|
||||
// It is crucial to get the living files after acquiring the
|
||||
// read lock of meta informations. That way, we
|
||||
// avoid the following scenario.
|
||||
//
|
||||
// 1) we get the list of living files.
|
||||
// 2) someone creates a new file.
|
||||
// 3) we start garbage collection and remove this file
|
||||
// even though it is a living file.
|
||||
//
|
||||
// releasing the lock as .delete() will use it too.
|
||||
{
|
||||
// releasing the lock as .delete() will use it too.
|
||||
let meta_informations_rlock = self.meta_informations
|
||||
let meta_informations_rlock = self
|
||||
.meta_informations
|
||||
.read()
|
||||
.expect("Managed directory rlock poisoned in garbage collect.");
|
||||
|
||||
// It is crucial to get the living files after acquiring the
|
||||
// read lock of meta informations. That way, we
|
||||
// avoid the following scenario.
|
||||
//
|
||||
// 1) we get the list of living files.
|
||||
// 2) someone creates a new file.
|
||||
// 3) we start garbage collection and remove this file
|
||||
// even though it is a living file.
|
||||
let living_files = get_living_files();
|
||||
|
||||
for managed_path in &meta_informations_rlock.managed_paths {
|
||||
if !living_files.contains(managed_path) {
|
||||
files_to_delete.push(managed_path.clone());
|
||||
// The point of this second "file" lock is to enforce the following scenario
|
||||
// 1) process B tries to load a new set of searcher.
|
||||
// The list of segments is loaded
|
||||
// 2) writer change meta.json (for instance after a merge or a commit)
|
||||
// 3) gc kicks in.
|
||||
// 4) gc removes a file that was useful for process B, before process B opened it.
|
||||
if let Ok(_meta_lock) = LockType::MetaLock.acquire_lock(self) {
|
||||
let living_files = get_living_files();
|
||||
for managed_path in &meta_informations_rlock.managed_paths {
|
||||
if !living_files.contains(managed_path) {
|
||||
files_to_delete.push(managed_path.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -134,7 +156,8 @@ impl ManagedDirectory {
|
||||
if !deleted_files.is_empty() {
|
||||
// update the list of managed files by removing
|
||||
// the file that were removed.
|
||||
let mut meta_informations_wlock = self.meta_informations
|
||||
let mut meta_informations_wlock = self
|
||||
.meta_informations
|
||||
.write()
|
||||
.expect("Managed directory wlock poisoned (2).");
|
||||
{
|
||||
@@ -156,8 +179,17 @@ impl ManagedDirectory {
|
||||
/// registering the filepath and creating the file
|
||||
/// will not lead to garbage files that will
|
||||
/// never get removed.
|
||||
///
|
||||
/// File starting by "." are reserved to locks.
|
||||
/// They are not managed and cannot be subjected
|
||||
/// to garbage collection.
|
||||
fn register_file_as_managed(&mut self, filepath: &Path) -> io::Result<()> {
|
||||
let mut meta_wlock = self.meta_informations
|
||||
// Files starting by "." (e.g. lock files) are not managed.
|
||||
if !is_managed(filepath) {
|
||||
return Ok(());
|
||||
}
|
||||
let mut meta_wlock = self
|
||||
.meta_informations
|
||||
.write()
|
||||
.expect("Managed file lock poisoned");
|
||||
let has_changed = meta_wlock.managed_paths.insert(filepath.to_owned());
|
||||
|
||||
@@ -32,7 +32,8 @@ fn open_mmap(full_path: &Path) -> result::Result<Option<MmapReadOnly>, OpenReadE
|
||||
}
|
||||
})?;
|
||||
|
||||
let meta_data = file.metadata()
|
||||
let meta_data = file
|
||||
.metadata()
|
||||
.map_err(|e| IOError::with_path(full_path.to_owned(), e))?;
|
||||
if meta_data.len() == 0 {
|
||||
// if the file size is 0, it will not be possible
|
||||
@@ -309,7 +310,8 @@ impl Directory for MmapDirectory {
|
||||
// when the last reference is gone.
|
||||
mmap_cache.cache.remove(&full_path);
|
||||
match fs::remove_file(&full_path) {
|
||||
Ok(_) => self.sync_directory()
|
||||
Ok(_) => self
|
||||
.sync_directory()
|
||||
.map_err(|e| IOError::with_path(path.to_owned(), e).into()),
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::NotFound {
|
||||
@@ -362,6 +364,11 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_open_non_existant_path() {
|
||||
assert!(MmapDirectory::open(PathBuf::from("./nowhere")).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_open_empty() {
|
||||
// empty file is actually an edge case because those
|
||||
|
||||
@@ -100,8 +100,7 @@ impl InnerDirectory {
|
||||
);
|
||||
let io_err = make_io_err(msg);
|
||||
OpenReadError::IOError(IOError::with_path(path.to_owned(), io_err))
|
||||
})
|
||||
.and_then(|readable_map| {
|
||||
}).and_then(|readable_map| {
|
||||
readable_map
|
||||
.get(path)
|
||||
.ok_or_else(|| OpenReadError::FileDoesNotExist(PathBuf::from(path)))
|
||||
@@ -121,8 +120,7 @@ impl InnerDirectory {
|
||||
);
|
||||
let io_err = make_io_err(msg);
|
||||
DeleteError::IOError(IOError::with_path(path.to_owned(), io_err))
|
||||
})
|
||||
.and_then(|mut writable_map| match writable_map.remove(path) {
|
||||
}).and_then(|mut writable_map| match writable_map.remove(path) {
|
||||
Some(_) => Ok(()),
|
||||
None => Err(DeleteError::FileDoesNotExist(PathBuf::from(path))),
|
||||
})
|
||||
@@ -170,10 +168,10 @@ impl Directory for RAMDirectory {
|
||||
let path_buf = PathBuf::from(path);
|
||||
let vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone());
|
||||
|
||||
let exists = self.fs
|
||||
let exists = self
|
||||
.fs
|
||||
.write(path_buf.clone(), &Vec::new())
|
||||
.map_err(|err| IOError::with_path(path.to_owned(), err))?;
|
||||
|
||||
// force the creation of the file to mimic the MMap directory.
|
||||
if exists {
|
||||
Err(OpenWriteError::FileAlreadyExists(path_buf))
|
||||
@@ -196,6 +194,10 @@ impl Directory for RAMDirectory {
|
||||
}
|
||||
|
||||
fn atomic_write(&mut self, path: &Path, data: &[u8]) -> io::Result<()> {
|
||||
fail_point!("RAMDirectory::atomic_write", |msg| Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
msg.unwrap_or("Undefined".to_string())
|
||||
)));
|
||||
let path_buf = PathBuf::from(path);
|
||||
let mut vec_writer = VecWriter::new(path_buf.clone(), self.fs.clone());
|
||||
self.fs.write(path_buf, &Vec::new())?;
|
||||
|
||||
@@ -5,7 +5,6 @@ use fst::raw::MmapReadOnly;
|
||||
use stable_deref_trait::{CloneStableDeref, StableDeref};
|
||||
use std::ops::Deref;
|
||||
|
||||
|
||||
/// Read object that represents files in tantivy.
|
||||
///
|
||||
/// These read objects are only in charge to deliver
|
||||
|
||||
40
src/error.rs
40
src/error.rs
@@ -4,6 +4,7 @@ use std::io;
|
||||
|
||||
use directory::error::{IOError, OpenDirectoryError, OpenReadError, OpenWriteError};
|
||||
use fastfield::FastFieldNotAvailableError;
|
||||
use indexer::LockType;
|
||||
use query;
|
||||
use schema;
|
||||
use serde_json;
|
||||
@@ -19,6 +20,15 @@ pub enum TantivyError {
|
||||
/// File already exists, this is a problem when we try to write into a new file.
|
||||
#[fail(display = "file already exists: '{:?}'", _0)]
|
||||
FileAlreadyExists(PathBuf),
|
||||
/// Index already exists in this directory
|
||||
#[fail(display = "index already exists")]
|
||||
IndexAlreadyExists,
|
||||
/// Failed to acquire file lock
|
||||
#[fail(
|
||||
display = "Failed to acquire Lockfile: {:?}. Possible causes: another IndexWriter instance or panic during previous lock drop.",
|
||||
_0
|
||||
)]
|
||||
LockFailure(LockType),
|
||||
/// IO Error.
|
||||
#[fail(display = "an IO error occurred: '{}'", _0)]
|
||||
IOError(#[cause] IOError),
|
||||
@@ -46,48 +56,46 @@ pub enum TantivyError {
|
||||
|
||||
impl From<FastFieldNotAvailableError> for TantivyError {
|
||||
fn from(fastfield_error: FastFieldNotAvailableError) -> TantivyError {
|
||||
TantivyError::FastFieldError(fastfield_error).into()
|
||||
TantivyError::FastFieldError(fastfield_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IOError> for TantivyError {
|
||||
fn from(io_error: IOError) -> TantivyError {
|
||||
TantivyError::IOError(io_error).into()
|
||||
TantivyError::IOError(io_error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for TantivyError {
|
||||
fn from(io_error: io::Error) -> TantivyError {
|
||||
TantivyError::IOError(io_error.into()).into()
|
||||
TantivyError::IOError(io_error.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<query::QueryParserError> for TantivyError {
|
||||
fn from(parsing_error: query::QueryParserError) -> TantivyError {
|
||||
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error)).into()
|
||||
TantivyError::InvalidArgument(format!("Query is invalid. {:?}", parsing_error))
|
||||
}
|
||||
}
|
||||
|
||||
impl<Guard> From<PoisonError<Guard>> for TantivyError {
|
||||
fn from(_: PoisonError<Guard>) -> TantivyError {
|
||||
TantivyError::Poisoned.into()
|
||||
TantivyError::Poisoned
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpenReadError> for TantivyError {
|
||||
fn from(error: OpenReadError) -> TantivyError {
|
||||
match error {
|
||||
OpenReadError::FileDoesNotExist(filepath) => {
|
||||
TantivyError::PathDoesNotExist(filepath).into()
|
||||
}
|
||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error).into(),
|
||||
OpenReadError::FileDoesNotExist(filepath) => TantivyError::PathDoesNotExist(filepath),
|
||||
OpenReadError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<schema::DocParsingError> for TantivyError {
|
||||
fn from(error: schema::DocParsingError) -> TantivyError {
|
||||
TantivyError::InvalidArgument(format!("Failed to parse document {:?}", error)).into()
|
||||
TantivyError::InvalidArgument(format!("Failed to parse document {:?}", error))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +106,7 @@ impl From<OpenWriteError> for TantivyError {
|
||||
TantivyError::FileAlreadyExists(filepath)
|
||||
}
|
||||
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
}.into()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,11 +114,11 @@ impl From<OpenDirectoryError> for TantivyError {
|
||||
fn from(error: OpenDirectoryError) -> TantivyError {
|
||||
match error {
|
||||
OpenDirectoryError::DoesNotExist(directory_path) => {
|
||||
TantivyError::PathDoesNotExist(directory_path).into()
|
||||
TantivyError::PathDoesNotExist(directory_path)
|
||||
}
|
||||
OpenDirectoryError::NotADirectory(directory_path) => {
|
||||
TantivyError::InvalidArgument(format!("{:?} is not a directory", directory_path))
|
||||
}
|
||||
OpenDirectoryError::NotADirectory(directory_path) => TantivyError::InvalidArgument(
|
||||
format!("{:?} is not a directory", directory_path),
|
||||
).into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -118,6 +126,6 @@ impl From<OpenDirectoryError> for TantivyError {
|
||||
impl From<serde_json::Error> for TantivyError {
|
||||
fn from(error: serde_json::Error) -> TantivyError {
|
||||
let io_err = io::Error::from(error);
|
||||
TantivyError::IOError(io_err.into()).into()
|
||||
TantivyError::IOError(io_err.into())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ impl BytesFastFieldWriter {
|
||||
self.next_doc();
|
||||
for field_value in doc.field_values() {
|
||||
if field_value.field() == self.field {
|
||||
if let &Value::Bytes(ref bytes) = field_value.value() {
|
||||
if let Value::Bytes(ref bytes) = *field_value.value() {
|
||||
self.vals.extend_from_slice(bytes);
|
||||
} else {
|
||||
panic!(
|
||||
|
||||
@@ -2,6 +2,7 @@ use bit_set::BitSet;
|
||||
use common::HasLen;
|
||||
use directory::ReadOnlySource;
|
||||
use directory::WritePtr;
|
||||
use space_usage::ByteCount;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use DocId;
|
||||
@@ -41,7 +42,8 @@ pub struct DeleteBitSet {
|
||||
impl DeleteBitSet {
|
||||
/// Opens a delete bitset given its data source.
|
||||
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
|
||||
let num_deleted: usize = data.as_slice()
|
||||
let num_deleted: usize = data
|
||||
.as_slice()
|
||||
.iter()
|
||||
.map(|b| b.count_ones() as usize)
|
||||
.sum();
|
||||
@@ -62,6 +64,11 @@ impl DeleteBitSet {
|
||||
b & (1u8 << shift) != 0
|
||||
}
|
||||
}
|
||||
|
||||
/// Summarize total space usage of this bitset.
|
||||
pub fn space_usage(&self) -> ByteCount {
|
||||
self.data.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasLen for DeleteBitSet {
|
||||
|
||||
@@ -56,7 +56,8 @@ impl FacetReader {
|
||||
|
||||
/// Given a term ordinal returns the term associated to it.
|
||||
pub fn facet_from_ord(&self, facet_ord: TermOrdinal, output: &mut Facet) {
|
||||
let found_term = self.term_dict
|
||||
let found_term = self
|
||||
.term_dict
|
||||
.ord_to_term(facet_ord as u64, output.inner_buffer_mut());
|
||||
assert!(found_term, "Term ordinal {} no found.", facet_ord);
|
||||
}
|
||||
|
||||
@@ -370,7 +370,7 @@ mod tests {
|
||||
pub fn generate_permutation() -> Vec<u64> {
|
||||
let seed: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
|
||||
let mut rng = XorShiftRng::from_seed(seed);
|
||||
let mut permutation: Vec<u64> = (0u64..1_000_000u64).collect();
|
||||
let mut permutation: Vec<u64> = (0u64..100_000u64).collect();
|
||||
rng.shuffle(&mut permutation);
|
||||
permutation
|
||||
}
|
||||
|
||||
@@ -132,7 +132,8 @@ impl MultiValueIntFastFieldWriter {
|
||||
);
|
||||
|
||||
let mut doc_vals: Vec<u64> = Vec::with_capacity(100);
|
||||
for (start, stop) in self.doc_index
|
||||
for (start, stop) in self
|
||||
.doc_index
|
||||
.windows(2)
|
||||
.map(|interval| (interval[0], interval[1]))
|
||||
.chain(Some(last_interval).into_iter())
|
||||
@@ -148,7 +149,6 @@ impl MultiValueIntFastFieldWriter {
|
||||
value_serializer.add_val(val)?;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
None => {
|
||||
let val_min_max = self.vals.iter().cloned().minmax();
|
||||
|
||||
@@ -11,7 +11,6 @@ use schema::SchemaBuilder;
|
||||
use schema::FAST;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::path::Path;
|
||||
use DocId;
|
||||
|
||||
@@ -80,7 +79,8 @@ impl<Item: FastValue> FastFieldReader<Item> {
|
||||
// TODO change start to `u64`.
|
||||
// For multifastfield, start is an index in a second fastfield, not a `DocId`
|
||||
pub fn get_range(&self, start: u32, output: &mut [Item]) {
|
||||
let output_u64: &mut [u64] = unsafe { mem::transmute(output) }; // ok: Item is either `u64` or `i64`
|
||||
// ok: Item is either `u64` or `i64`
|
||||
let output_u64: &mut [u64] = unsafe { &mut *(output as *mut [Item] as *mut [u64]) };
|
||||
self.bit_unpacker.get_range(start, output_u64);
|
||||
for out in output_u64.iter_mut() {
|
||||
*out = Item::from_u64(*out + self.min_value_u64).as_u64();
|
||||
|
||||
@@ -10,27 +10,28 @@ pub fn fieldnorm_to_id(fieldnorm: u32) -> u8 {
|
||||
.unwrap_or_else(|idx| idx - 1) as u8
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))]
|
||||
pub const FIELD_NORMS_TABLE: [u32; 256] = [
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
|
||||
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 44, 46, 48, 50, 52, 54, 56, 60,
|
||||
64, 68, 72, 76, 80, 84, 88, 96, 104, 112, 120, 128, 136, 144, 152, 168, 184, 200, 216, 232,
|
||||
248, 264, 280, 312, 344, 376, 408, 440, 472, 504, 536, 600, 664, 728, 792, 856, 920, 984, 1048,
|
||||
1176, 1304, 1432, 1560, 1688, 1816, 1944, 2072, 2328, 2584, 2840, 3096, 3352, 3608, 3864, 4120,
|
||||
4632, 5144, 5656, 6168, 6680, 7192, 7704, 8216, 9240, 10264, 11288, 12312, 13336, 14360, 15384,
|
||||
16408, 18456, 20504, 22552, 24600, 26648, 28696, 30744, 32792, 36888, 40984, 45080, 49176,
|
||||
53272, 57368, 61464, 65560, 73752, 81944, 90136, 98328, 106520, 114712, 122904, 131096, 147480,
|
||||
163864, 180248, 196632, 213016, 229400, 245784, 262168, 294936, 327704, 360472, 393240, 426008,
|
||||
458776, 491544, 524312, 589848, 655384, 720920, 786456, 851992, 917528, 983064, 1048600,
|
||||
1179672, 1310744, 1441816, 1572888, 1703960, 1835032, 1966104, 2097176, 2359320, 2621464,
|
||||
2883608, 3145752, 3407896, 3670040, 3932184, 4194328, 4718616, 5242904, 5767192, 6291480,
|
||||
6815768, 7340056, 7864344, 8388632, 9437208, 10485784, 11534360, 12582936, 13631512, 14680088,
|
||||
15728664, 16777240, 18874392, 20971544, 23068696, 25165848, 27263000, 29360152, 31457304,
|
||||
33554456, 37748760, 41943064, 46137368, 50331672, 54525976, 58720280, 62914584, 67108888,
|
||||
75497496, 83886104, 92274712, 100663320, 109051928, 117440536, 125829144, 134217752, 150994968,
|
||||
167772184, 184549400, 201326616, 218103832, 234881048, 251658264, 268435480, 301989912,
|
||||
335544344, 369098776, 402653208, 436207640, 469762072, 503316504, 536870936, 603979800,
|
||||
671088664, 738197528, 805306392, 872415256, 939524120, 1006632984, 1073741848, 1207959576,
|
||||
1342177304, 1476395032, 1610612760, 1744830488, 1879048216, 2013265944,
|
||||
248, 264, 280, 312, 344, 376, 408, 440, 472, 504, 536, 600, 664, 728, 792, 856, 920, 984,
|
||||
1_048, 1176, 1304, 1432, 1560, 1688, 1816, 1944, 2072, 2328, 2584, 2840, 3096, 3352, 3608,
|
||||
3864, 4120, 4632, 5144, 5656, 6168, 6680, 7192, 7704, 8216, 9240, 10264, 11288, 12312, 13336,
|
||||
14360, 15384, 16408, 18456, 20504, 22552, 24600, 26648, 28696, 30744, 32792, 36888, 40984,
|
||||
45080, 49176, 53272, 57368, 61464, 65560, 73752, 81944, 90136, 98328, 106520, 114712, 122904,
|
||||
131096, 147480, 163864, 180248, 196632, 213016, 229400, 245784, 262168, 294936, 327704, 360472,
|
||||
393240, 426008, 458776, 491544, 524312, 589848, 655384, 720920, 786456, 851992, 917528, 983064,
|
||||
1048600, 1179672, 1310744, 1441816, 1572888, 1703960, 1835032, 1966104, 2097176, 2359320,
|
||||
2621464, 2883608, 3145752, 3407896, 3670040, 3932184, 4194328, 4718616, 5242904, 5767192,
|
||||
6291480, 6815768, 7340056, 7864344, 8388632, 9437208, 10485784, 11534360, 12582936, 13631512,
|
||||
14680088, 15728664, 16777240, 18874392, 20971544, 23068696, 25165848, 27263000, 29360152,
|
||||
31457304, 33554456, 37748760, 41943064, 46137368, 50331672, 54525976, 58720280, 62914584,
|
||||
67108888, 75497496, 83886104, 92274712, 100663320, 109051928, 117440536, 125829144, 134217752,
|
||||
150994968, 167772184, 184549400, 201326616, 218103832, 234881048, 251658264, 268435480,
|
||||
301989912, 335544344, 369098776, 402653208, 436207640, 469762072, 503316504, 536870936,
|
||||
603979800, 671088664, 738197528, 805306392, 872415256, 939524120, 1006632984, 1073741848,
|
||||
1207959576, 1342177304, 1476395032, 1610612760, 1744830488, 1879048216, 2013265944,
|
||||
];
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use rand::thread_rng;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use rand::Rng;
|
||||
use rand::distributions::Range;
|
||||
use rand::Rng;
|
||||
use schema::*;
|
||||
use Index;
|
||||
use Searcher;
|
||||
|
||||
@@ -52,7 +52,8 @@ impl DeleteQueue {
|
||||
//
|
||||
// Past delete operations are not accessible.
|
||||
pub fn cursor(&self) -> DeleteCursor {
|
||||
let last_block = self.inner
|
||||
let last_block = self
|
||||
.inner
|
||||
.read()
|
||||
.expect("Read lock poisoned when opening delete queue cursor")
|
||||
.last_block
|
||||
@@ -92,7 +93,8 @@ impl DeleteQueue {
|
||||
// be some unflushed operations.
|
||||
//
|
||||
fn flush(&self) -> Option<Arc<Block>> {
|
||||
let mut self_wlock = self.inner
|
||||
let mut self_wlock = self
|
||||
.inner
|
||||
.write()
|
||||
.expect("Failed to acquire write lock on delete queue writer");
|
||||
|
||||
@@ -132,7 +134,8 @@ impl From<DeleteQueue> for NextBlock {
|
||||
impl NextBlock {
|
||||
fn next_block(&self) -> Option<Arc<Block>> {
|
||||
{
|
||||
let next_read_lock = self.0
|
||||
let next_read_lock = self
|
||||
.0
|
||||
.read()
|
||||
.expect("Failed to acquire write lock in delete queue");
|
||||
if let InnerNextBlock::Closed(ref block) = *next_read_lock {
|
||||
@@ -141,7 +144,8 @@ impl NextBlock {
|
||||
}
|
||||
let next_block;
|
||||
{
|
||||
let mut next_write_lock = self.0
|
||||
let mut next_write_lock = self
|
||||
.0
|
||||
.write()
|
||||
.expect("Failed to acquire write lock in delete queue");
|
||||
match *next_write_lock {
|
||||
@@ -182,19 +186,21 @@ impl DeleteCursor {
|
||||
/// `opstamp >= target_opstamp`.
|
||||
pub fn skip_to(&mut self, target_opstamp: u64) {
|
||||
// TODO Can be optimize as we work with block.
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(while_let_loop))]
|
||||
loop {
|
||||
if let Some(operation) = self.get() {
|
||||
if operation.opstamp >= target_opstamp {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
while self.is_behind_opstamp(target_opstamp) {
|
||||
self.advance();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(
|
||||
feature = "cargo-clippy",
|
||||
allow(clippy::wrong_self_convention)
|
||||
)]
|
||||
fn is_behind_opstamp(&mut self, target_opstamp: u64) -> bool {
|
||||
self.get()
|
||||
.map(|operation| operation.opstamp < target_opstamp)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// If the current block has been entirely
|
||||
/// consumed, try to load the next one.
|
||||
///
|
||||
|
||||
@@ -1,26 +1,130 @@
|
||||
use core::LOCKFILE_FILEPATH;
|
||||
use directory::error::OpenWriteError;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use Directory;
|
||||
use TantivyError;
|
||||
|
||||
/// The directory lock is a mechanism used to
|
||||
/// prevent the creation of two [`IndexWriter`](struct.IndexWriter.html)
|
||||
///
|
||||
/// Only one lock can exist at a time for a given directory.
|
||||
/// The lock is release automatically on `Drop`.
|
||||
pub struct DirectoryLock {
|
||||
directory: Box<Directory>,
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum LockType {
|
||||
/// Only one process should be able to write tantivy's index at a time.
|
||||
/// This lock file, when present, is in charge of preventing other processes to open an IndexWriter.
|
||||
///
|
||||
/// If the process is killed and this file remains, it is safe to remove it manually.
|
||||
///
|
||||
/// Failing to acquire this lock usually means a misuse of tantivy's API,
|
||||
/// (creating more than one instance of the `IndexWriter`), are a spurious
|
||||
/// lock file remaining after a crash. In the latter case, removing the file after
|
||||
/// checking no process running tantivy is running is safe.
|
||||
IndexWriterLock,
|
||||
/// The meta lock file is here to protect the segment files being opened by
|
||||
/// `.load_searchers()` from being garbage collected.
|
||||
/// It makes it possible for another process to safely consume
|
||||
/// our index in-writing. Ideally, we may have prefered `RWLock` semantics
|
||||
/// here, but it is difficult to achieve on Windows.
|
||||
///
|
||||
/// Opening segment readers is a very fast process.
|
||||
/// Right now if the lock cannot be acquire on the first attempt, the logic
|
||||
/// is very simplistic. We retry after `100ms` until we effectively
|
||||
/// acquire the lock.
|
||||
/// This lock should not have much contention in normal usage.
|
||||
MetaLock,
|
||||
}
|
||||
|
||||
impl DirectoryLock {
|
||||
pub fn lock(mut directory: Box<Directory>) -> Result<DirectoryLock, OpenWriteError> {
|
||||
directory.open_write(&*LOCKFILE_FILEPATH)?;
|
||||
Ok(DirectoryLock { directory })
|
||||
/// Retry the logic of acquiring locks is pretty simple.
|
||||
/// We just retry `n` times after a given `duratio`, both
|
||||
/// depending on the type of lock.
|
||||
struct RetryPolicy {
|
||||
num_retries: usize,
|
||||
wait_in_ms: u64,
|
||||
}
|
||||
|
||||
impl RetryPolicy {
|
||||
fn no_retry() -> RetryPolicy {
|
||||
RetryPolicy {
|
||||
num_retries: 0,
|
||||
wait_in_ms: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_and_retry(&mut self) -> bool {
|
||||
if self.num_retries == 0 {
|
||||
false
|
||||
} else {
|
||||
self.num_retries -= 1;
|
||||
let wait_duration = Duration::from_millis(self.wait_in_ms);
|
||||
thread::sleep(wait_duration);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LockType {
|
||||
fn retry_policy(self) -> RetryPolicy {
|
||||
match self {
|
||||
LockType::IndexWriterLock => RetryPolicy::no_retry(),
|
||||
LockType::MetaLock => RetryPolicy {
|
||||
num_retries: 100,
|
||||
wait_in_ms: 100,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn try_acquire_lock(self, directory: &mut Directory) -> Result<DirectoryLock, TantivyError> {
|
||||
let path = self.filename();
|
||||
let mut write = directory.open_write(path).map_err(|e| match e {
|
||||
OpenWriteError::FileAlreadyExists(_) => TantivyError::LockFailure(self),
|
||||
OpenWriteError::IOError(io_error) => TantivyError::IOError(io_error),
|
||||
})?;
|
||||
write.flush()?;
|
||||
Ok(DirectoryLock {
|
||||
directory: directory.box_clone(),
|
||||
path: path.to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Acquire a lock in the given directory.
|
||||
pub fn acquire_lock(self, directory: &Directory) -> Result<DirectoryLock, TantivyError> {
|
||||
let mut box_directory = directory.box_clone();
|
||||
let mut retry_policy = self.retry_policy();
|
||||
loop {
|
||||
let lock_result = self.try_acquire_lock(&mut *box_directory);
|
||||
match lock_result {
|
||||
Ok(result) => {
|
||||
return Ok(result);
|
||||
}
|
||||
Err(TantivyError::LockFailure(ref filepath)) => {
|
||||
if !retry_policy.wait_and_retry() {
|
||||
return Err(TantivyError::LockFailure(filepath.to_owned()));
|
||||
}
|
||||
}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn filename(&self) -> &Path {
|
||||
match *self {
|
||||
LockType::MetaLock => Path::new(".tantivy-meta.lock"),
|
||||
LockType::IndexWriterLock => Path::new(".tantivy-indexer.lock"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The `DirectoryLock` is an object that represents a file lock.
|
||||
/// See [`LockType`](struct.LockType.html)
|
||||
///
|
||||
/// It is transparently associated to a lock file, that gets deleted
|
||||
/// on `Drop.` The lock is release automatically on `Drop`.
|
||||
pub struct DirectoryLock {
|
||||
directory: Box<Directory>,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl Drop for DirectoryLock {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = self.directory.delete(&*LOCKFILE_FILEPATH) {
|
||||
if let Err(e) = self.directory.delete(&*self.path) {
|
||||
error!("Failed to remove the lock file. {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,14 +54,14 @@ type DocumentReceiver = channel::Receiver<AddOperation>;
|
||||
fn initial_table_size(per_thread_memory_budget: usize) -> usize {
|
||||
let table_size_limit: usize = per_thread_memory_budget / 3;
|
||||
(1..)
|
||||
.into_iter()
|
||||
.take_while(|num_bits: &usize| compute_table_size(*num_bits) < table_size_limit)
|
||||
.last()
|
||||
.expect(&format!(
|
||||
"Per thread memory is too small: {}",
|
||||
per_thread_memory_budget
|
||||
))
|
||||
.min(19) // we cap it at 512K
|
||||
.unwrap_or_else(|| {
|
||||
panic!(
|
||||
"Per thread memory is too small: {}",
|
||||
per_thread_memory_budget
|
||||
)
|
||||
}).min(19) // we cap it at 512K
|
||||
}
|
||||
|
||||
/// `IndexWriter` is the user entry-point to add document to an index.
|
||||
@@ -177,7 +177,7 @@ pub fn compute_deleted_bitset(
|
||||
) -> Result<bool> {
|
||||
let mut might_have_changed = false;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(while_let_loop))]
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::while_let_loop))]
|
||||
loop {
|
||||
if let Some(delete_op) = delete_cursor.get() {
|
||||
if delete_op.opstamp > target_opstamp {
|
||||
@@ -301,25 +301,29 @@ fn index_documents(
|
||||
|
||||
let last_docstamp: u64 = *(doc_opstamps.last().unwrap());
|
||||
|
||||
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
||||
let segment_reader = SegmentReader::open(segment)?;
|
||||
let mut deleted_bitset = BitSet::with_capacity(num_docs as usize);
|
||||
let may_have_deletes = compute_deleted_bitset(
|
||||
&mut deleted_bitset,
|
||||
&segment_reader,
|
||||
&mut delete_cursor,
|
||||
&doc_to_opstamps,
|
||||
last_docstamp,
|
||||
)?;
|
||||
|
||||
let segment_entry = SegmentEntry::new(segment_meta, delete_cursor, {
|
||||
if may_have_deletes {
|
||||
Some(deleted_bitset)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let segment_entry: SegmentEntry = if delete_cursor.get().is_some() {
|
||||
let doc_to_opstamps = DocToOpstampMapping::from(doc_opstamps);
|
||||
let segment_reader = SegmentReader::open(segment)?;
|
||||
let mut deleted_bitset = BitSet::with_capacity(num_docs as usize);
|
||||
let may_have_deletes = compute_deleted_bitset(
|
||||
&mut deleted_bitset,
|
||||
&segment_reader,
|
||||
&mut delete_cursor,
|
||||
&doc_to_opstamps,
|
||||
last_docstamp,
|
||||
)?;
|
||||
SegmentEntry::new(segment_meta, delete_cursor, {
|
||||
if may_have_deletes {
|
||||
Some(deleted_bitset)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
} else {
|
||||
// if there are no delete operation in the queue, no need
|
||||
// to even open the segment.
|
||||
SegmentEntry::new(segment_meta, delete_cursor, None)
|
||||
};
|
||||
Ok(segment_updater.add_segment(generation, segment_entry))
|
||||
}
|
||||
|
||||
@@ -341,7 +345,8 @@ impl IndexWriter {
|
||||
}
|
||||
drop(self.workers_join_handle);
|
||||
|
||||
let result = self.segment_updater
|
||||
let result = self
|
||||
.segment_updater
|
||||
.wait_merging_thread()
|
||||
.map_err(|_| TantivyError::ErrorInThread("Failed to join merging thread.".into()));
|
||||
|
||||
@@ -385,11 +390,9 @@ impl IndexWriter {
|
||||
.name(format!(
|
||||
"indexing thread {} for gen {}",
|
||||
self.worker_id, generation
|
||||
))
|
||||
.spawn(move || {
|
||||
)).spawn(move || {
|
||||
loop {
|
||||
let mut document_iterator =
|
||||
document_receiver_clone.clone().into_iter().peekable();
|
||||
let mut document_iterator = document_receiver_clone.clone().peekable();
|
||||
|
||||
// the peeking here is to avoid
|
||||
// creating a new segment's files
|
||||
@@ -488,7 +491,8 @@ impl IndexWriter {
|
||||
let document_receiver = self.document_receiver.clone();
|
||||
|
||||
// take the directory lock to create a new index_writer.
|
||||
let directory_lock = self._directory_lock
|
||||
let directory_lock = self
|
||||
._directory_lock
|
||||
.take()
|
||||
.expect("The IndexWriter does not have any lock. This is a bug, please report.");
|
||||
|
||||
@@ -657,11 +661,26 @@ mod tests {
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let _index_writer = index.writer(40_000_000).unwrap();
|
||||
match index.writer(40_000_000) {
|
||||
Err(TantivyError::FileAlreadyExists(_)) => {}
|
||||
Err(TantivyError::LockFailure(_)) => {}
|
||||
_ => panic!("Expected FileAlreadyExists error"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lockfile_already_exists_error_msg() {
|
||||
let schema_builder = schema::SchemaBuilder::default();
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
let _index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
match index.writer_with_num_threads(1, 3_000_000) {
|
||||
Err(err) => {
|
||||
let err_msg = err.to_string();
|
||||
assert!(err_msg.contains("Lockfile"));
|
||||
assert!(err_msg.contains("Possible causes:"))
|
||||
}
|
||||
_ => panic!("Expected LockfileAlreadyExists error"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_merge_policy() {
|
||||
let schema_builder = schema::SchemaBuilder::default();
|
||||
@@ -843,4 +862,32 @@ mod tests {
|
||||
assert_eq!(initial_table_size(1_000_000_000), 19);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "no_fail"))]
|
||||
#[test]
|
||||
fn test_write_commit_fails() {
|
||||
use fail;
|
||||
let mut schema_builder = schema::SchemaBuilder::default();
|
||||
let text_field = schema_builder.add_text_field("text", schema::TEXT);
|
||||
let index = Index::create_in_ram(schema_builder.build());
|
||||
|
||||
let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap();
|
||||
for _ in 0..100 {
|
||||
index_writer.add_document(doc!(text_field => "a"));
|
||||
}
|
||||
index_writer.commit().unwrap();
|
||||
fail::cfg("RAMDirectory::atomic_write", "return(error_write_failed)").unwrap();
|
||||
for _ in 0..100 {
|
||||
index_writer.add_document(doc!(text_field => "b"));
|
||||
}
|
||||
assert!(index_writer.commit().is_err());
|
||||
index.load_searchers().unwrap();
|
||||
let num_docs_containing = |s: &str| {
|
||||
let searcher = index.searcher();
|
||||
let term_a = Term::from_field_text(text_field, s);
|
||||
searcher.doc_freq(&term_a)
|
||||
};
|
||||
assert_eq!(num_docs_containing("a"), 100);
|
||||
assert_eq!(num_docs_containing("b"), 0);
|
||||
fail::cfg("RAMDirectory::atomic_write", "off").unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,17 +21,17 @@ pub trait MergePolicy: MergePolicyClone + marker::Send + marker::Sync + Debug {
|
||||
|
||||
/// MergePolicyClone
|
||||
pub trait MergePolicyClone {
|
||||
/// Returns a boxed clone of the MergePolicy.
|
||||
fn box_clone(&self) -> Box<MergePolicy>;
|
||||
/// Returns a boxed clone of the MergePolicy.
|
||||
fn box_clone(&self) -> Box<MergePolicy>;
|
||||
}
|
||||
|
||||
impl<T> MergePolicyClone for T
|
||||
where
|
||||
T: 'static + MergePolicy + Clone,
|
||||
T: 'static + MergePolicy + Clone,
|
||||
{
|
||||
fn box_clone(&self) -> Box<MergePolicy> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
fn box_clone(&self) -> Box<MergePolicy> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// Never merge segments.
|
||||
|
||||
@@ -40,15 +40,13 @@ fn compute_total_num_tokens(readers: &[SegmentReader], field: Field) -> u64 {
|
||||
total_tokens += reader.inverted_index(field).total_num_tokens();
|
||||
}
|
||||
}
|
||||
total_tokens
|
||||
+ count
|
||||
.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
.map(|(fieldnorm_ord, count)| {
|
||||
count as u64 * FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8) as u64
|
||||
})
|
||||
.sum::<u64>()
|
||||
total_tokens + count
|
||||
.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
.map(|(fieldnorm_ord, count)| {
|
||||
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
|
||||
}).sum::<u64>()
|
||||
}
|
||||
|
||||
pub struct IndexMerger {
|
||||
@@ -111,7 +109,7 @@ impl TermOrdinalMapping {
|
||||
.iter()
|
||||
.flat_map(|term_ordinals| term_ordinals.iter().cloned().max())
|
||||
.max()
|
||||
.unwrap_or(TermOrdinal::default())
|
||||
.unwrap_or_else(TermOrdinal::default)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,7 +188,7 @@ impl IndexMerger {
|
||||
`term_ordinal_mapping`.");
|
||||
self.write_hierarchical_facet_field(
|
||||
field,
|
||||
term_ordinal_mapping,
|
||||
&term_ordinal_mapping,
|
||||
fast_field_serializer,
|
||||
)?;
|
||||
}
|
||||
@@ -314,7 +312,7 @@ impl IndexMerger {
|
||||
fn write_hierarchical_facet_field(
|
||||
&self,
|
||||
field: Field,
|
||||
term_ordinal_mappings: TermOrdinalMapping,
|
||||
term_ordinal_mappings: &TermOrdinalMapping,
|
||||
fast_field_serializer: &mut FastFieldSerializer,
|
||||
) -> Result<()> {
|
||||
// Multifastfield consists in 2 fastfields.
|
||||
@@ -393,8 +391,8 @@ impl IndexMerger {
|
||||
|
||||
// We can now initialize our serializer, and push it the different values
|
||||
{
|
||||
let mut serialize_vals =
|
||||
fast_field_serializer.new_u64_fast_field_with_idx(field, min_value, max_value, 1)?;
|
||||
let mut serialize_vals = fast_field_serializer
|
||||
.new_u64_fast_field_with_idx(field, min_value, max_value, 1)?;
|
||||
for reader in &self.readers {
|
||||
let ff_reader: MultiValueIntFastFieldReader<u64> =
|
||||
reader.multi_fast_field_reader(field)?;
|
||||
@@ -440,7 +438,8 @@ impl IndexMerger {
|
||||
) -> Result<Option<TermOrdinalMapping>> {
|
||||
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
|
||||
let mut delta_computer = DeltaComputer::new();
|
||||
let field_readers = self.readers
|
||||
let field_readers = self
|
||||
.readers
|
||||
.iter()
|
||||
.map(|reader| reader.inverted_index(indexed_field))
|
||||
.collect::<Vec<_>>();
|
||||
@@ -524,8 +523,7 @@ impl IndexMerger {
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
|
||||
// At this point, `segment_postings` contains the posting list
|
||||
// of all of the segments containing the given term.
|
||||
@@ -666,8 +664,7 @@ mod tests {
|
||||
TextFieldIndexing::default()
|
||||
.set_tokenizer("default")
|
||||
.set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_stored();
|
||||
).set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||
@@ -769,24 +766,24 @@ mod tests {
|
||||
);
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(&DocAddress(0, 0)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), "af b");
|
||||
let doc = searcher.doc(DocAddress(0, 0)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(&DocAddress(0, 1)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), "a b c");
|
||||
let doc = searcher.doc(DocAddress(0, 1)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(&DocAddress(0, 2)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), "a b c d");
|
||||
let doc = searcher.doc(DocAddress(0, 2)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c d"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(&DocAddress(0, 3)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), "af b");
|
||||
let doc = searcher.doc(DocAddress(0, 3)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("af b"));
|
||||
}
|
||||
{
|
||||
let doc = searcher.doc(&DocAddress(0, 4)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), "a b c g");
|
||||
let doc = searcher.doc(DocAddress(0, 4)).unwrap();
|
||||
assert_eq!(doc.get_first(text_field).unwrap().text(), Some("a b c g"));
|
||||
}
|
||||
{
|
||||
let get_fast_vals = |terms: Vec<Term>| {
|
||||
@@ -821,8 +818,7 @@ mod tests {
|
||||
let text_fieldtype = schema::TextOptions::default()
|
||||
.set_indexing_options(
|
||||
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
|
||||
)
|
||||
.set_stored();
|
||||
).set_stored();
|
||||
let text_field = schema_builder.add_text_field("text", text_fieldtype);
|
||||
let score_fieldtype = schema::IntOptions::default().set_fast(Cardinality::SingleValue);
|
||||
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
|
||||
|
||||
@@ -16,6 +16,8 @@ mod segment_writer;
|
||||
mod stamper;
|
||||
|
||||
pub(crate) use self::directory_lock::DirectoryLock;
|
||||
pub use self::directory_lock::LockType;
|
||||
|
||||
pub use self::index_writer::IndexWriter;
|
||||
pub use self::log_merge_policy::LogMergePolicy;
|
||||
pub use self::merge_policy::{MergeCandidate, MergePolicy, NoMergePolicy};
|
||||
|
||||
@@ -11,8 +11,8 @@ pub enum SegmentState {
|
||||
}
|
||||
|
||||
impl SegmentState {
|
||||
pub fn letter_code(&self) -> char {
|
||||
match *self {
|
||||
pub fn letter_code(self) -> char {
|
||||
match self {
|
||||
SegmentState::InMerge => 'M',
|
||||
SegmentState::Ready => 'R',
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use super::segment_register::SegmentRegister;
|
||||
use core::SegmentId;
|
||||
use core::SegmentMeta;
|
||||
use core::{LOCKFILE_FILEPATH, META_FILEPATH};
|
||||
use core::META_FILEPATH;
|
||||
use error::TantivyError;
|
||||
use indexer::delete_queue::DeleteCursor;
|
||||
use indexer::SegmentEntry;
|
||||
@@ -78,10 +78,13 @@ impl SegmentManager {
|
||||
registers_lock.committed.len() + registers_lock.uncommitted.len()
|
||||
}
|
||||
|
||||
/// List the files that are useful to the index.
|
||||
///
|
||||
/// This does not include lock files, or files that are obsolete
|
||||
/// but have not yet been deleted by the garbage collector.
|
||||
pub fn list_files(&self) -> HashSet<PathBuf> {
|
||||
let mut files = HashSet::new();
|
||||
files.insert(META_FILEPATH.clone());
|
||||
files.insert(LOCKFILE_FILEPATH.clone());
|
||||
for segment_meta in SegmentMeta::all() {
|
||||
files.extend(segment_meta.list_files());
|
||||
}
|
||||
|
||||
@@ -51,7 +51,8 @@ impl SegmentRegister {
|
||||
}
|
||||
|
||||
pub fn segment_metas(&self) -> Vec<SegmentMeta> {
|
||||
let mut segment_ids: Vec<SegmentMeta> = self.segment_states
|
||||
let mut segment_ids: Vec<SegmentMeta> = self
|
||||
.segment_states
|
||||
.values()
|
||||
.map(|segment_entry| segment_entry.meta().clone())
|
||||
.collect();
|
||||
|
||||
@@ -72,7 +72,7 @@ pub fn save_metas(
|
||||
payload,
|
||||
};
|
||||
let mut buffer = serde_json::to_vec_pretty(&metas)?;
|
||||
write!(&mut buffer, "\n")?;
|
||||
writeln!(&mut buffer)?;
|
||||
directory.atomic_write(&META_FILEPATH, &buffer[..])?;
|
||||
debug!("Saved metas {:?}", serde_json::to_string_pretty(&metas));
|
||||
Ok(())
|
||||
@@ -336,8 +336,7 @@ impl SegmentUpdater {
|
||||
.unwrap()
|
||||
.remove(&merging_thread_id);
|
||||
Ok(())
|
||||
})
|
||||
.expect("Failed to spawn a thread.");
|
||||
}).expect("Failed to spawn a thread.");
|
||||
self.0
|
||||
.merging_threads
|
||||
.write()
|
||||
|
||||
@@ -49,20 +49,20 @@ impl SegmentWriter {
|
||||
) -> Result<SegmentWriter> {
|
||||
let segment_serializer = SegmentSerializer::for_segment(&mut segment)?;
|
||||
let multifield_postings = MultiFieldPostingsWriter::new(schema, table_bits);
|
||||
let tokenizers = schema
|
||||
.fields()
|
||||
.iter()
|
||||
.map(|field_entry| field_entry.field_type())
|
||||
.map(|field_type| match *field_type {
|
||||
FieldType::Str(ref text_options) => text_options.get_indexing_options().and_then(
|
||||
|text_index_option| {
|
||||
let tokenizer_name = &text_index_option.tokenizer();
|
||||
segment.index().tokenizers().get(tokenizer_name)
|
||||
},
|
||||
),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
let tokenizers =
|
||||
schema
|
||||
.fields()
|
||||
.iter()
|
||||
.map(|field_entry| field_entry.field_type())
|
||||
.map(|field_type| match *field_type {
|
||||
FieldType::Str(ref text_options) => text_options
|
||||
.get_indexing_options()
|
||||
.and_then(|text_index_option| {
|
||||
let tokenizer_name = &text_index_option.tokenizer();
|
||||
segment.index().tokenizers().get(tokenizer_name)
|
||||
}),
|
||||
_ => None,
|
||||
}).collect();
|
||||
Ok(SegmentWriter {
|
||||
max_doc: 0,
|
||||
multifield_postings,
|
||||
@@ -117,8 +117,7 @@ impl SegmentWriter {
|
||||
_ => {
|
||||
panic!("Expected hierarchical facet");
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
let mut term = Term::for_field(field); // we set the Term
|
||||
for facet_bytes in facets {
|
||||
let mut unordered_term_id_opt = None;
|
||||
@@ -146,8 +145,7 @@ impl SegmentWriter {
|
||||
.flat_map(|field_value| match *field_value.value() {
|
||||
Value::Str(ref text) => Some(text.as_str()),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
if texts.is_empty() {
|
||||
0
|
||||
} else {
|
||||
|
||||
41
src/lib.rs
Normal file → Executable file
41
src/lib.rs
Normal file → Executable file
@@ -1,11 +1,8 @@
|
||||
#![doc(html_logo_url = "http://fulmicoton.com/tantivy-logo/tantivy-logo.png")]
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(module_inception))]
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(inline_always))]
|
||||
#![cfg_attr(all(feature = "unstable", test), feature(test))]
|
||||
#![cfg_attr(feature = "cargo-clippy", feature(tool_lints))]
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(clippy::module_inception))]
|
||||
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(new_without_default)]
|
||||
#![allow(decimal_literal_representation)]
|
||||
#![warn(missing_docs)]
|
||||
#![recursion_limit = "80"]
|
||||
|
||||
@@ -96,7 +93,7 @@
|
||||
//! // most relevant doc ids...
|
||||
//! let doc_addresses = top_collector.docs();
|
||||
//! for doc_address in doc_addresses {
|
||||
//! let retrieved_doc = searcher.doc(&doc_address)?;
|
||||
//! let retrieved_doc = searcher.doc(doc_address)?;
|
||||
//! println!("{}", schema.to_json(&retrieved_doc));
|
||||
//! }
|
||||
//!
|
||||
@@ -133,7 +130,6 @@ extern crate bit_set;
|
||||
extern crate bitpacking;
|
||||
extern crate byteorder;
|
||||
|
||||
#[macro_use]
|
||||
extern crate combine;
|
||||
|
||||
extern crate crossbeam;
|
||||
@@ -143,6 +139,7 @@ extern crate fst;
|
||||
extern crate fst_regex;
|
||||
extern crate futures;
|
||||
extern crate futures_cpupool;
|
||||
extern crate htmlescape;
|
||||
extern crate itertools;
|
||||
extern crate levenshtein_automata;
|
||||
extern crate num_cpus;
|
||||
@@ -155,6 +152,8 @@ extern crate tempdir;
|
||||
extern crate tempfile;
|
||||
extern crate uuid;
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
@@ -165,14 +164,19 @@ extern crate winapi;
|
||||
#[cfg(test)]
|
||||
extern crate rand;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate maplit;
|
||||
|
||||
#[cfg(all(test, feature = "unstable"))]
|
||||
extern crate test;
|
||||
|
||||
extern crate tinysegmenter;
|
||||
|
||||
#[macro_use]
|
||||
extern crate downcast;
|
||||
|
||||
#[macro_use]
|
||||
extern crate fail;
|
||||
|
||||
#[cfg(test)]
|
||||
mod functional_test;
|
||||
|
||||
@@ -181,7 +185,10 @@ mod macros;
|
||||
|
||||
pub use error::TantivyError;
|
||||
|
||||
#[deprecated(since="0.7.0", note="please use `tantivy::TantivyError` instead")]
|
||||
#[deprecated(
|
||||
since = "0.7.0",
|
||||
note = "please use `tantivy::TantivyError` instead"
|
||||
)]
|
||||
pub use error::TantivyError as Error;
|
||||
|
||||
extern crate census;
|
||||
@@ -206,9 +213,13 @@ pub(crate) mod positions;
|
||||
pub mod postings;
|
||||
pub mod query;
|
||||
pub mod schema;
|
||||
pub mod space_usage;
|
||||
pub mod store;
|
||||
pub mod termdict;
|
||||
|
||||
mod snippet;
|
||||
pub use self::snippet::SnippetGenerator;
|
||||
|
||||
mod docset;
|
||||
pub use self::docset::{DocSet, SkipResult};
|
||||
|
||||
@@ -261,12 +272,12 @@ impl DocAddress {
|
||||
/// The segment ordinal is an id identifying the segment
|
||||
/// hosting the document. It is only meaningful, in the context
|
||||
/// of a searcher.
|
||||
pub fn segment_ord(&self) -> SegmentLocalId {
|
||||
pub fn segment_ord(self) -> SegmentLocalId {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Return the segment local `DocId`
|
||||
pub fn doc(&self) -> DocId {
|
||||
pub fn doc(self) -> DocId {
|
||||
self.1
|
||||
}
|
||||
}
|
||||
@@ -892,11 +903,11 @@ mod tests {
|
||||
assert_eq!(document.len(), 3);
|
||||
let values = document.get_all(text_field);
|
||||
assert_eq!(values.len(), 2);
|
||||
assert_eq!(values[0].text(), "tantivy");
|
||||
assert_eq!(values[1].text(), "some other value");
|
||||
assert_eq!(values[0].text(), Some("tantivy"));
|
||||
assert_eq!(values[1].text(), Some("some other value"));
|
||||
let values = document.get_all(other_text_field);
|
||||
assert_eq!(values.len(), 1);
|
||||
assert_eq!(values[0].text(), "short");
|
||||
assert_eq!(values[0].text(), Some("short"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
/// Positions are stored in three parts and over two files.
|
||||
//
|
||||
/// The `SegmentComponent::POSITIONS` file contains all of the bitpacked positions delta,
|
||||
@@ -24,13 +23,12 @@
|
||||
/// The long skip structure makes it possible to skip rapidly to the a checkpoint close to this
|
||||
/// value, and then skip normally.
|
||||
///
|
||||
|
||||
mod reader;
|
||||
mod serializer;
|
||||
|
||||
pub use self::reader::PositionReader;
|
||||
pub use self::serializer::PositionSerializer;
|
||||
use bitpacking::{BitPacker4x, BitPacker};
|
||||
use bitpacking::{BitPacker, BitPacker4x};
|
||||
|
||||
const COMPRESSION_BLOCK_SIZE: usize = BitPacker4x::BLOCK_LEN;
|
||||
const LONG_SKIP_IN_BLOCKS: usize = 1_024;
|
||||
@@ -43,10 +41,10 @@ lazy_static! {
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use std::iter;
|
||||
use super::{PositionSerializer, PositionReader};
|
||||
use super::{PositionReader, PositionSerializer};
|
||||
use directory::ReadOnlySource;
|
||||
use positions::COMPRESSION_BLOCK_SIZE;
|
||||
use std::iter;
|
||||
|
||||
fn create_stream_buffer(vals: &[u32]) -> (ReadOnlySource, ReadOnlySource) {
|
||||
let mut skip_buffer = vec![];
|
||||
@@ -59,7 +57,10 @@ pub mod tests {
|
||||
}
|
||||
serializer.close().unwrap();
|
||||
}
|
||||
(ReadOnlySource::from(stream_buffer), ReadOnlySource::from(skip_buffer))
|
||||
(
|
||||
ReadOnlySource::from(stream_buffer),
|
||||
ReadOnlySource::from(skip_buffer),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -103,7 +104,7 @@ pub mod tests {
|
||||
assert_eq!(skip.len(), 12);
|
||||
assert_eq!(stream.len(), 1168);
|
||||
|
||||
let mut position_reader = PositionReader::new(stream,skip, 0u64);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 0u64);
|
||||
let mut buf = [0u32; 7];
|
||||
let mut c = 0;
|
||||
for _ in 0..100 {
|
||||
@@ -125,7 +126,7 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 1_000_000);
|
||||
let mut position_reader = PositionReader::new(stream,skip, 128 * 1024);
|
||||
let mut position_reader = PositionReader::new(stream, skip, 128 * 1024);
|
||||
let mut buf = [0u32; 1];
|
||||
position_reader.read(&mut buf);
|
||||
assert_eq!(buf[0], CONST_VAL);
|
||||
@@ -137,12 +138,17 @@ pub mod tests {
|
||||
let (stream, skip) = create_stream_buffer(&v[..]);
|
||||
assert_eq!(skip.len(), 15_749);
|
||||
assert_eq!(stream.len(), 4_987_872);
|
||||
for &offset in &[10, 128 * 1024, 128 * 1024 - 1, 128 * 1024 + 7, 128 * 10 * 1024 + 10] {
|
||||
let mut position_reader = PositionReader::new(stream.clone(),skip.clone(), offset);
|
||||
for &offset in &[
|
||||
10,
|
||||
128 * 1024,
|
||||
128 * 1024 - 1,
|
||||
128 * 1024 + 7,
|
||||
128 * 10 * 1024 + 10,
|
||||
] {
|
||||
let mut position_reader = PositionReader::new(stream.clone(), skip.clone(), offset);
|
||||
let mut buf = [0u32; 1];
|
||||
position_reader.read(&mut buf);
|
||||
assert_eq!(buf[0], offset as u32);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use bitpacking::{BitPacker4x, BitPacker};
|
||||
use owned_read::OwnedRead;
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use postings::compression::compressed_block_size;
|
||||
use directory::ReadOnlySource;
|
||||
use positions::COMPRESSION_BLOCK_SIZE;
|
||||
use positions::LONG_SKIP_IN_BLOCKS;
|
||||
use positions::LONG_SKIP_INTERVAL;
|
||||
use super::BIT_PACKER;
|
||||
use bitpacking::{BitPacker, BitPacker4x};
|
||||
use common::{BinarySerializable, FixedSize};
|
||||
use directory::ReadOnlySource;
|
||||
use owned_read::OwnedRead;
|
||||
use positions::COMPRESSION_BLOCK_SIZE;
|
||||
use positions::LONG_SKIP_INTERVAL;
|
||||
use positions::LONG_SKIP_IN_BLOCKS;
|
||||
use postings::compression::compressed_block_size;
|
||||
|
||||
pub struct PositionReader {
|
||||
skip_read: OwnedRead,
|
||||
@@ -18,7 +18,6 @@ pub struct PositionReader {
|
||||
// of the block of the next int to read.
|
||||
}
|
||||
|
||||
|
||||
// `ahead` represents the offset of the block currently loaded
|
||||
// compared to the cursor of the actual stream.
|
||||
//
|
||||
@@ -32,7 +31,8 @@ fn read_impl(
|
||||
buffer: &mut [u32; 128],
|
||||
mut inner_offset: usize,
|
||||
num_bits: &[u8],
|
||||
output: &mut [u32]) -> usize {
|
||||
output: &mut [u32],
|
||||
) -> usize {
|
||||
let mut output_start = 0;
|
||||
let mut output_len = output.len();
|
||||
let mut ahead = 0;
|
||||
@@ -47,8 +47,7 @@ fn read_impl(
|
||||
output_start += available_len;
|
||||
inner_offset = 0;
|
||||
let num_bits = num_bits[ahead];
|
||||
BitPacker4x::new()
|
||||
.decompress(position, &mut buffer[..], num_bits);
|
||||
BitPacker4x::new().decompress(position, &mut buffer[..], num_bits);
|
||||
let block_len = compressed_block_size(num_bits);
|
||||
position = &position[block_len..];
|
||||
ahead += 1;
|
||||
@@ -56,11 +55,12 @@ fn read_impl(
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl PositionReader {
|
||||
pub fn new(position_source: ReadOnlySource,
|
||||
skip_source: ReadOnlySource,
|
||||
offset: u64) -> PositionReader {
|
||||
pub fn new(
|
||||
position_source: ReadOnlySource,
|
||||
skip_source: ReadOnlySource,
|
||||
offset: u64,
|
||||
) -> PositionReader {
|
||||
let skip_len = skip_source.len();
|
||||
let (body, footer) = skip_source.split(skip_len - u32::SIZE_IN_BYTES);
|
||||
let num_long_skips = u32::deserialize(&mut footer.as_slice()).expect("Index corrupted");
|
||||
@@ -70,7 +70,8 @@ impl PositionReader {
|
||||
let small_skip = (offset - (long_skip_id as u64) * (LONG_SKIP_INTERVAL as u64)) as usize;
|
||||
let offset_num_bytes: u64 = {
|
||||
if long_skip_id > 0 {
|
||||
let mut long_skip_blocks: &[u8] = &long_skips.as_slice()[(long_skip_id - 1) * 8..][..8];
|
||||
let mut long_skip_blocks: &[u8] =
|
||||
&long_skips.as_slice()[(long_skip_id - 1) * 8..][..8];
|
||||
u64::deserialize(&mut long_skip_blocks).expect("Index corrupted") * 16
|
||||
} else {
|
||||
0
|
||||
@@ -79,13 +80,13 @@ impl PositionReader {
|
||||
let mut position_read = OwnedRead::new(position_source);
|
||||
position_read.advance(offset_num_bytes as usize);
|
||||
let mut skip_read = OwnedRead::new(skip_body);
|
||||
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
||||
skip_read.advance(long_skip_id * LONG_SKIP_IN_BLOCKS);
|
||||
let mut position_reader = PositionReader {
|
||||
skip_read,
|
||||
position_read,
|
||||
inner_offset: 0,
|
||||
buffer: Box::new([0u32; 128]),
|
||||
ahead: None
|
||||
ahead: None,
|
||||
};
|
||||
position_reader.skip(small_skip);
|
||||
position_reader
|
||||
@@ -108,7 +109,8 @@ impl PositionReader {
|
||||
self.buffer.as_mut(),
|
||||
self.inner_offset,
|
||||
&skip_data[1..],
|
||||
output));
|
||||
output,
|
||||
));
|
||||
}
|
||||
|
||||
/// Skip the next `skip_len` integer.
|
||||
@@ -118,27 +120,25 @@ impl PositionReader {
|
||||
///
|
||||
/// May panic if the end of the stream is reached.
|
||||
pub fn skip(&mut self, skip_len: usize) {
|
||||
|
||||
let skip_len_plus_inner_offset = skip_len + self.inner_offset;
|
||||
|
||||
let num_blocks_to_advance = skip_len_plus_inner_offset / COMPRESSION_BLOCK_SIZE;
|
||||
self.inner_offset = skip_len_plus_inner_offset % COMPRESSION_BLOCK_SIZE;
|
||||
|
||||
self.ahead = self.ahead
|
||||
.and_then(|num_blocks| {
|
||||
if num_blocks >= num_blocks_to_advance {
|
||||
Some(num_blocks_to_advance - num_blocks_to_advance)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
self.ahead = self.ahead.and_then(|num_blocks| {
|
||||
if num_blocks >= num_blocks_to_advance {
|
||||
Some(num_blocks - num_blocks_to_advance)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let skip_len = self.skip_read
|
||||
.as_ref()[..num_blocks_to_advance]
|
||||
let skip_len = self.skip_read.as_ref()[..num_blocks_to_advance]
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|num_bit| num_bit as usize)
|
||||
.sum::<usize>() * (COMPRESSION_BLOCK_SIZE / 8);
|
||||
.sum::<usize>()
|
||||
* (COMPRESSION_BLOCK_SIZE / 8);
|
||||
|
||||
self.skip_read.advance(num_blocks_to_advance);
|
||||
self.position_read.advance(skip_len);
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::io;
|
||||
use bitpacking::BitPacker;
|
||||
use positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL};
|
||||
use common::BinarySerializable;
|
||||
use super::BIT_PACKER;
|
||||
use bitpacking::BitPacker;
|
||||
use common::BinarySerializable;
|
||||
use positions::{COMPRESSION_BLOCK_SIZE, LONG_SKIP_INTERVAL};
|
||||
use std::io;
|
||||
|
||||
pub struct PositionSerializer<W: io::Write> {
|
||||
write_stream: W,
|
||||
@@ -23,7 +23,7 @@ impl<W: io::Write> PositionSerializer<W> {
|
||||
buffer: vec![0u8; 128 * 4],
|
||||
num_ints: 0u64,
|
||||
long_skips: Vec::new(),
|
||||
cumulated_num_bits: 0u64
|
||||
cumulated_num_bits: 0u64,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ impl<W: io::Write> PositionSerializer<W> {
|
||||
self.num_ints
|
||||
}
|
||||
|
||||
|
||||
fn remaining_block_len(&self) -> usize {
|
||||
COMPRESSION_BLOCK_SIZE - self.block.len()
|
||||
}
|
||||
@@ -52,8 +51,8 @@ impl<W: io::Write> PositionSerializer<W> {
|
||||
|
||||
fn flush_block(&mut self) -> io::Result<()> {
|
||||
let num_bits = BIT_PACKER.num_bits(&self.block[..]);
|
||||
self.cumulated_num_bits += num_bits as u64;
|
||||
self.write_skiplist.write(&[num_bits])?;
|
||||
self.cumulated_num_bits += u64::from(num_bits);
|
||||
self.write_skiplist.write_all(&[num_bits])?;
|
||||
let written_len = BIT_PACKER.compress(&self.block[..], &mut self.buffer, num_bits);
|
||||
self.write_stream.write_all(&self.buffer[..written_len])?;
|
||||
self.block.clear();
|
||||
|
||||
@@ -28,14 +28,16 @@ impl BlockEncoder {
|
||||
|
||||
pub fn compress_block_sorted(&mut self, block: &[u32], offset: u32) -> (u8, &[u8]) {
|
||||
let num_bits = self.bitpacker.num_bits_sorted(offset, block);
|
||||
let written_size = self.bitpacker
|
||||
.compress_sorted(offset, block, &mut self.output[..], num_bits);
|
||||
let written_size =
|
||||
self.bitpacker
|
||||
.compress_sorted(offset, block, &mut self.output[..], num_bits);
|
||||
(num_bits, &self.output[..written_size])
|
||||
}
|
||||
|
||||
pub fn compress_block_unsorted(&mut self, block: &[u32]) -> (u8, &[u8]) {
|
||||
let num_bits = self.bitpacker.num_bits(block);
|
||||
let written_size = self.bitpacker
|
||||
let written_size = self
|
||||
.bitpacker
|
||||
.compress(block, &mut self.output[..], num_bits);
|
||||
(num_bits, &self.output[..written_size])
|
||||
}
|
||||
@@ -62,19 +64,21 @@ impl BlockDecoder {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncompress_block_sorted(&mut self, compressed_data: &[u8], offset: u32, num_bits: u8) -> usize {
|
||||
pub fn uncompress_block_sorted(
|
||||
&mut self,
|
||||
compressed_data: &[u8],
|
||||
offset: u32,
|
||||
num_bits: u8,
|
||||
) -> usize {
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
self.bitpacker.decompress_sorted(
|
||||
offset,
|
||||
&compressed_data,
|
||||
&mut self.output,
|
||||
num_bits,
|
||||
)
|
||||
self.bitpacker
|
||||
.decompress_sorted(offset, &compressed_data, &mut self.output, num_bits)
|
||||
}
|
||||
|
||||
pub fn uncompress_block_unsorted(&mut self, compressed_data: &[u8], num_bits: u8) -> usize {
|
||||
self.output_len = COMPRESSION_BLOCK_SIZE;
|
||||
self.bitpacker.decompress(&compressed_data, &mut self.output, num_bits)
|
||||
self.bitpacker
|
||||
.decompress(&compressed_data, &mut self.output, num_bits)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -88,7 +92,6 @@ impl BlockDecoder {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub trait VIntEncoder {
|
||||
/// Compresses an array of `u32` integers,
|
||||
/// using [delta-encoding](https://en.wikipedia.org/wiki/Delta_ encoding)
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
#[inline(always)]
|
||||
pub fn compress_sorted<'a>(
|
||||
input: &[u32],
|
||||
output: &'a mut [u8],
|
||||
mut offset: u32,
|
||||
) -> &'a [u8] {
|
||||
pub fn compress_sorted<'a>(input: &[u32], output: &'a mut [u8], mut offset: u32) -> &'a [u8] {
|
||||
let mut byte_written = 0;
|
||||
for &v in input {
|
||||
let mut to_encode: u32 = v - offset;
|
||||
@@ -46,47 +42,41 @@ pub(crate) fn compress_unsorted<'a>(input: &[u32], output: &'a mut [u8]) -> &'a
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn uncompress_sorted<'a>(
|
||||
compressed_data: &'a [u8],
|
||||
output: &mut [u32],
|
||||
offset: u32,
|
||||
) -> usize {
|
||||
pub fn uncompress_sorted<'a>(compressed_data: &'a [u8], output: &mut [u32], offset: u32) -> usize {
|
||||
let mut read_byte = 0;
|
||||
let mut result = offset;
|
||||
let num_els = output.len();
|
||||
for i in 0..num_els {
|
||||
for output_mut in output.iter_mut() {
|
||||
let mut shift = 0u32;
|
||||
loop {
|
||||
let cur_byte = compressed_data[read_byte];
|
||||
read_byte += 1;
|
||||
result += ((cur_byte % 128u8) as u32) << shift;
|
||||
result += u32::from(cur_byte % 128u8) << shift;
|
||||
if cur_byte & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
output[i] = result;
|
||||
*output_mut = result;
|
||||
}
|
||||
read_byte
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn uncompress_unsorted<'a>(compressed_data: &'a [u8], output: &mut [u32]) -> usize {
|
||||
pub(crate) fn uncompress_unsorted(compressed_data: &[u8], output_arr: &mut [u32]) -> usize {
|
||||
let mut read_byte = 0;
|
||||
let num_els = output.len();
|
||||
for i in 0..num_els {
|
||||
for output_mut in output_arr.iter_mut() {
|
||||
let mut result = 0u32;
|
||||
let mut shift = 0u32;
|
||||
loop {
|
||||
let cur_byte = compressed_data[read_byte];
|
||||
read_byte += 1;
|
||||
result += ((cur_byte % 128u8) as u32) << shift;
|
||||
result += u32::from(cur_byte % 128u8) << shift;
|
||||
if cur_byte & 128u8 != 0u8 {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
}
|
||||
output[i] = result;
|
||||
*output_mut = result;
|
||||
}
|
||||
read_byte
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
Postings module (also called inverted index)
|
||||
*/
|
||||
|
||||
pub(crate) mod compression;
|
||||
/// Postings module
|
||||
///
|
||||
/// Postings, also called inverted lists, is the key datastructure
|
||||
@@ -11,18 +12,17 @@ mod postings_writer;
|
||||
mod recorder;
|
||||
mod segment_postings;
|
||||
mod serializer;
|
||||
pub(crate) mod compression;
|
||||
mod skip;
|
||||
mod stacker;
|
||||
mod term_info;
|
||||
mod skip;
|
||||
|
||||
pub(crate) use self::postings_writer::MultiFieldPostingsWriter;
|
||||
pub use self::serializer::{FieldSerializer, InvertedIndexSerializer};
|
||||
|
||||
use self::compression::COMPRESSION_BLOCK_SIZE;
|
||||
pub use self::postings::Postings;
|
||||
pub use self::term_info::TermInfo;
|
||||
pub(crate) use self::skip::SkipReader;
|
||||
use self::compression::{COMPRESSION_BLOCK_SIZE};
|
||||
pub use self::term_info::TermInfo;
|
||||
|
||||
pub use self::segment_postings::{BlockSegmentPostings, SegmentPostings};
|
||||
|
||||
@@ -34,7 +34,7 @@ pub(crate) const USE_SKIP_INFO_LIMIT: u32 = COMPRESSION_BLOCK_SIZE as u32;
|
||||
|
||||
pub(crate) type UnorderedTermId = u64;
|
||||
|
||||
#[allow(enum_variant_names)]
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::enum_variant_names))]
|
||||
#[derive(Debug, PartialEq, Clone, Copy, Eq)]
|
||||
pub(crate) enum FreqReadingOption {
|
||||
NoFreq,
|
||||
@@ -71,8 +71,7 @@ pub mod tests {
|
||||
let mut segment = index.new_segment();
|
||||
let mut posting_serializer = InvertedIndexSerializer::open(&mut segment).unwrap();
|
||||
{
|
||||
let mut field_serializer = posting_serializer
|
||||
.new_field(text_field, 120 * 4).unwrap();
|
||||
let mut field_serializer = posting_serializer.new_field(text_field, 120 * 4).unwrap();
|
||||
field_serializer.new_term("abc".as_bytes()).unwrap();
|
||||
for doc_id in 0u32..120u32 {
|
||||
let delta_positions = vec![1, 2, 3, 2];
|
||||
@@ -512,13 +511,13 @@ pub mod tests {
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
for _ in 0..posting_list_size {
|
||||
let mut doc = Document::default();
|
||||
if rng.gen_bool(1f64/ 15f64) {
|
||||
if rng.gen_bool(1f64 / 15f64) {
|
||||
doc.add_text(text_field, "a");
|
||||
}
|
||||
if rng.gen_bool(1f64/ 10f64) {
|
||||
if rng.gen_bool(1f64 / 10f64) {
|
||||
doc.add_text(text_field, "b");
|
||||
}
|
||||
if rng.gen_bool(1f64/ 5f64) {
|
||||
if rng.gen_bool(1f64 / 5f64) {
|
||||
doc.add_text(text_field, "c");
|
||||
}
|
||||
doc.add_text(text_field, "d");
|
||||
|
||||
@@ -15,7 +15,7 @@ use tokenizer::TokenStream;
|
||||
use DocId;
|
||||
use Result;
|
||||
|
||||
fn posting_from_field_entry<'a>(field_entry: &FieldEntry) -> Box<PostingsWriter> {
|
||||
fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<PostingsWriter> {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::Str(ref text_options) => text_options
|
||||
.get_indexing_options()
|
||||
@@ -29,8 +29,7 @@ fn posting_from_field_entry<'a>(field_entry: &FieldEntry) -> Box<PostingsWriter>
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed()
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed()),
|
||||
}).unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed()),
|
||||
FieldType::U64(_) | FieldType::I64(_) | FieldType::HierarchicalFacet => {
|
||||
SpecializedPostingsWriter::<NothingRecorder>::new_boxed()
|
||||
}
|
||||
@@ -94,11 +93,12 @@ impl MultiFieldPostingsWriter {
|
||||
&self,
|
||||
serializer: &mut InvertedIndexSerializer,
|
||||
) -> Result<HashMap<Field, HashMap<UnorderedTermId, TermOrdinal>>> {
|
||||
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = self.term_index
|
||||
let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = self
|
||||
.term_index
|
||||
.iter()
|
||||
.map(|(term_bytes, addr, bucket_id)| (term_bytes, addr, bucket_id as UnorderedTermId))
|
||||
.collect();
|
||||
term_offsets.sort_by_key(|&(k, _, _)| k);
|
||||
term_offsets.sort_unstable_by_key(|&(k, _, _)| k);
|
||||
|
||||
let mut offsets: Vec<(Field, usize)> = vec![];
|
||||
let term_offsets_it = term_offsets
|
||||
@@ -127,8 +127,8 @@ impl MultiFieldPostingsWriter {
|
||||
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
|
||||
match field_entry.field_type() {
|
||||
&FieldType::Str(_) | &FieldType::HierarchicalFacet => {
|
||||
match *field_entry.field_type() {
|
||||
FieldType::Str(_) | FieldType::HierarchicalFacet => {
|
||||
// populating the (unordered term ord) -> (ordered term ord) mapping
|
||||
// for the field.
|
||||
let mut unordered_term_ids = term_offsets[start..stop]
|
||||
@@ -138,12 +138,11 @@ impl MultiFieldPostingsWriter {
|
||||
.enumerate()
|
||||
.map(|(term_ord, unord_term_id)| {
|
||||
(unord_term_id as UnorderedTermId, term_ord as TermOrdinal)
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
unordered_term_mappings.insert(field, mapping);
|
||||
}
|
||||
&FieldType::U64(_) | &FieldType::I64(_) => {}
|
||||
&FieldType::Bytes => {}
|
||||
FieldType::U64(_) | FieldType::I64(_) => {}
|
||||
FieldType::Bytes => {}
|
||||
}
|
||||
|
||||
let postings_writer = &self.per_field_postings_writers[field.0 as usize];
|
||||
@@ -202,14 +201,11 @@ pub trait PostingsWriter {
|
||||
heap: &mut MemoryArena,
|
||||
) -> u32 {
|
||||
let mut term = Term::for_field(field);
|
||||
let num_tokens = {
|
||||
let mut sink = |token: &Token| {
|
||||
term.set_text(token.text.as_str());
|
||||
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
|
||||
};
|
||||
token_stream.process(&mut sink)
|
||||
let mut sink = |token: &Token| {
|
||||
term.set_text(token.text.as_str());
|
||||
self.subscribe(term_index, doc_id, token.position as u32, &term, heap);
|
||||
};
|
||||
num_tokens
|
||||
token_stream.process(&mut sink)
|
||||
}
|
||||
|
||||
fn total_num_tokens(&self) -> u64;
|
||||
|
||||
@@ -107,7 +107,8 @@ impl Recorder for TermFrequencyRecorder {
|
||||
fn serialize(&self, serializer: &mut FieldSerializer, heap: &MemoryArena) -> io::Result<()> {
|
||||
// the last document has not been closed...
|
||||
// its term freq is self.current_tf.
|
||||
let mut doc_iter = self.stack
|
||||
let mut doc_iter = self
|
||||
.stack
|
||||
.iter(heap)
|
||||
.chain(Some(self.current_tf).into_iter());
|
||||
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
|
||||
use DocId;
|
||||
use common::BitSet;
|
||||
use common::HasLen;
|
||||
use postings::compression::compressed_block_size;
|
||||
use common::{BinarySerializable, VInt};
|
||||
use docset::{DocSet, SkipResult};
|
||||
use fst::Streamer;
|
||||
use owned_read::OwnedRead;
|
||||
use positions::PositionReader;
|
||||
use postings::compression::compressed_block_size;
|
||||
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
|
||||
use postings::serializer::PostingsSerializer;
|
||||
use postings::FreqReadingOption;
|
||||
use postings::Postings;
|
||||
use owned_read::OwnedRead;
|
||||
use common::{VInt, BinarySerializable};
|
||||
use postings::USE_SKIP_INFO_LIMIT;
|
||||
use postings::SkipReader;
|
||||
use postings::USE_SKIP_INFO_LIMIT;
|
||||
use schema::IndexRecordOption;
|
||||
use positions::PositionReader;
|
||||
use std::cmp::Ordering;
|
||||
use DocId;
|
||||
|
||||
const EMPTY_ARR: [u8; 0] = [];
|
||||
|
||||
@@ -98,7 +98,7 @@ impl SegmentPostings {
|
||||
docs.len() as u32,
|
||||
OwnedRead::new(buffer),
|
||||
IndexRecordOption::Basic,
|
||||
IndexRecordOption::Basic
|
||||
IndexRecordOption::Basic,
|
||||
);
|
||||
SegmentPostings::from_block_postings(block_segment_postings, None)
|
||||
}
|
||||
@@ -126,7 +126,6 @@ impl SegmentPostings {
|
||||
fn exponential_search(target: u32, arr: &[u32]) -> (usize, usize) {
|
||||
let mut start = 0;
|
||||
let end = arr.len();
|
||||
debug_assert!(target >= arr[start]);
|
||||
debug_assert!(target <= arr[end - 1]);
|
||||
let mut jump = 1;
|
||||
loop {
|
||||
@@ -151,7 +150,11 @@ fn exponential_search(target: u32, arr: &[u32]) -> (usize, usize) {
|
||||
/// The target is assumed smaller or equal to the last element.
|
||||
fn search_within_block(block_docs: &[u32], target: u32) -> usize {
|
||||
let (start, end) = exponential_search(target, block_docs);
|
||||
start.wrapping_add(block_docs[start..end].binary_search(&target).unwrap_or_else(|e| e))
|
||||
start.wrapping_add(
|
||||
block_docs[start..end]
|
||||
.binary_search(&target)
|
||||
.unwrap_or_else(|e| e),
|
||||
)
|
||||
}
|
||||
|
||||
impl DocSet for SegmentPostings {
|
||||
@@ -179,21 +182,20 @@ impl DocSet for SegmentPostings {
|
||||
// check if we need to go to the next block
|
||||
let need_positions = self.position_computer.is_some();
|
||||
let mut sum_freqs_skipped: u32 = 0;
|
||||
if !self.block_cursor
|
||||
.docs()
|
||||
.last()
|
||||
.map(|doc| *doc >= target)
|
||||
.unwrap_or(false) // there should always be at least a document in the block
|
||||
// since advance returned.
|
||||
if !self
|
||||
.block_cursor
|
||||
.docs()
|
||||
.last()
|
||||
.map(|doc| *doc >= target)
|
||||
.unwrap_or(false)
|
||||
// there should always be at least a document in the block
|
||||
// since advance returned.
|
||||
{
|
||||
// we are not in the right block.
|
||||
//
|
||||
// First compute all of the freqs skipped from the current block.
|
||||
if need_positions {
|
||||
sum_freqs_skipped = self.block_cursor
|
||||
.freqs()[self.cur..]
|
||||
.iter()
|
||||
.sum();
|
||||
sum_freqs_skipped = self.block_cursor.freqs()[self.cur..].iter().sum();
|
||||
match self.block_cursor.skip_to(target) {
|
||||
BlockSegmentPostingsSkipResult::Success(block_skip_freqs) => {
|
||||
sum_freqs_skipped += block_skip_freqs;
|
||||
@@ -202,22 +204,25 @@ impl DocSet for SegmentPostings {
|
||||
return SkipResult::End;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if self.block_cursor.skip_to(target)
|
||||
== BlockSegmentPostingsSkipResult::Terminated
|
||||
{
|
||||
// no positions needed. no need to sum freqs.
|
||||
if self.block_cursor.skip_to(target) == BlockSegmentPostingsSkipResult::Terminated {
|
||||
return SkipResult::End;
|
||||
}
|
||||
return SkipResult::End;
|
||||
}
|
||||
self.cur = 0;
|
||||
}
|
||||
|
||||
// we're in the right block now, start with an exponential search
|
||||
let block_docs = self.block_cursor.docs();
|
||||
let new_cur = self
|
||||
.cur
|
||||
.wrapping_add(search_within_block(&block_docs[self.cur..], target));
|
||||
|
||||
debug_assert!(target >= self.doc());
|
||||
let new_cur = self.cur.wrapping_add(search_within_block(&block_docs[self.cur..], target));
|
||||
if need_positions {
|
||||
sum_freqs_skipped += self.block_cursor.freqs()[self.cur..new_cur].iter().sum::<u32>();
|
||||
sum_freqs_skipped += self.block_cursor.freqs()[self.cur..new_cur]
|
||||
.iter()
|
||||
.sum::<u32>();
|
||||
self.position_computer
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
@@ -229,9 +234,9 @@ impl DocSet for SegmentPostings {
|
||||
let doc = block_docs[new_cur];
|
||||
debug_assert!(doc >= target);
|
||||
if doc == target {
|
||||
return SkipResult::Reached;
|
||||
SkipResult::Reached
|
||||
} else {
|
||||
return SkipResult::OverStep;
|
||||
SkipResult::OverStep
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,7 +335,10 @@ pub struct BlockSegmentPostings {
|
||||
skip_reader: SkipReader,
|
||||
}
|
||||
|
||||
fn split_into_skips_and_postings(doc_freq: u32, mut data: OwnedRead) -> (Option<OwnedRead>, OwnedRead) {
|
||||
fn split_into_skips_and_postings(
|
||||
doc_freq: u32,
|
||||
mut data: OwnedRead,
|
||||
) -> (Option<OwnedRead>, OwnedRead) {
|
||||
if doc_freq >= USE_SKIP_INFO_LIMIT {
|
||||
let skip_len = VInt::deserialize(&mut data).expect("Data corrupted").0 as usize;
|
||||
let mut postings_data = data.clone();
|
||||
@@ -345,7 +353,7 @@ fn split_into_skips_and_postings(doc_freq: u32, mut data: OwnedRead) -> (Option<
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub enum BlockSegmentPostingsSkipResult {
|
||||
Terminated,
|
||||
Success(u32) //< number of term freqs to skip
|
||||
Success(u32), //< number of term freqs to skip
|
||||
}
|
||||
|
||||
impl BlockSegmentPostings {
|
||||
@@ -353,7 +361,7 @@ impl BlockSegmentPostings {
|
||||
doc_freq: u32,
|
||||
data: OwnedRead,
|
||||
record_option: IndexRecordOption,
|
||||
requested_option: IndexRecordOption
|
||||
requested_option: IndexRecordOption,
|
||||
) -> BlockSegmentPostings {
|
||||
let freq_reading_option = match (record_option, requested_option) {
|
||||
(IndexRecordOption::Basic, _) => FreqReadingOption::NoFreq,
|
||||
@@ -362,11 +370,10 @@ impl BlockSegmentPostings {
|
||||
};
|
||||
|
||||
let (skip_data_opt, postings_data) = split_into_skips_and_postings(doc_freq, data);
|
||||
let skip_reader =
|
||||
match skip_data_opt {
|
||||
Some(skip_data) => SkipReader::new(skip_data, record_option),
|
||||
None => SkipReader::new(OwnedRead::new(&EMPTY_ARR[..]), record_option)
|
||||
};
|
||||
let skip_reader = match skip_data_opt {
|
||||
Some(skip_data) => SkipReader::new(skip_data, record_option),
|
||||
None => SkipReader::new(OwnedRead::new(&EMPTY_ARR[..]), record_option),
|
||||
};
|
||||
let doc_freq = doc_freq as usize;
|
||||
let num_vint_docs = doc_freq % COMPRESSION_BLOCK_SIZE;
|
||||
BlockSegmentPostings {
|
||||
@@ -450,7 +457,6 @@ impl BlockSegmentPostings {
|
||||
self.doc_decoder.output_len
|
||||
}
|
||||
|
||||
|
||||
/// position on a block that may contains `doc_id`.
|
||||
/// Always advance the current block.
|
||||
///
|
||||
@@ -461,9 +467,7 @@ impl BlockSegmentPostings {
|
||||
/// Returns false iff all of the document remaining are smaller than
|
||||
/// `doc_id`. In that case, all of these document are consumed.
|
||||
///
|
||||
pub fn skip_to(&mut self,
|
||||
target_doc: DocId) -> BlockSegmentPostingsSkipResult {
|
||||
|
||||
pub fn skip_to(&mut self, target_doc: DocId) -> BlockSegmentPostingsSkipResult {
|
||||
let mut skip_freqs = 0u32;
|
||||
while self.skip_reader.advance() {
|
||||
if self.skip_reader.doc() >= target_doc {
|
||||
@@ -472,11 +476,11 @@ impl BlockSegmentPostings {
|
||||
//
|
||||
// We found our block!
|
||||
let num_bits = self.skip_reader.doc_num_bits();
|
||||
let num_consumed_bytes = self.doc_decoder
|
||||
.uncompress_block_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
num_bits);
|
||||
let num_consumed_bytes = self.doc_decoder.uncompress_block_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
num_bits,
|
||||
);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
let tf_num_bits = self.skip_reader.tf_num_bits();
|
||||
match self.freq_reading_option {
|
||||
@@ -486,9 +490,9 @@ impl BlockSegmentPostings {
|
||||
self.remaining_data.advance(num_bytes_to_skip);
|
||||
}
|
||||
FreqReadingOption::ReadFreq => {
|
||||
let num_consumed_bytes = self.freq_decoder
|
||||
.uncompress_block_unsorted(self.remaining_data.as_ref(),
|
||||
tf_num_bits);
|
||||
let num_consumed_bytes = self
|
||||
.freq_decoder
|
||||
.uncompress_block_unsorted(self.remaining_data.as_ref(), tf_num_bits);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
}
|
||||
}
|
||||
@@ -518,7 +522,8 @@ impl BlockSegmentPostings {
|
||||
}
|
||||
}
|
||||
self.num_vint_docs = 0;
|
||||
return self.docs()
|
||||
return self
|
||||
.docs()
|
||||
.last()
|
||||
.map(|last_doc| {
|
||||
if *last_doc >= target_doc {
|
||||
@@ -526,8 +531,7 @@ impl BlockSegmentPostings {
|
||||
} else {
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
}
|
||||
})
|
||||
.unwrap_or(BlockSegmentPostingsSkipResult::Terminated);
|
||||
}).unwrap_or(BlockSegmentPostingsSkipResult::Terminated);
|
||||
}
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
}
|
||||
@@ -538,11 +542,11 @@ impl BlockSegmentPostings {
|
||||
pub fn advance(&mut self) -> bool {
|
||||
if self.skip_reader.advance() {
|
||||
let num_bits = self.skip_reader.doc_num_bits();
|
||||
let num_consumed_bytes = self.doc_decoder
|
||||
.uncompress_block_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
num_bits);
|
||||
let num_consumed_bytes = self.doc_decoder.uncompress_block_sorted(
|
||||
self.remaining_data.as_ref(),
|
||||
self.doc_offset,
|
||||
num_bits,
|
||||
);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
let tf_num_bits = self.skip_reader.tf_num_bits();
|
||||
match self.freq_reading_option {
|
||||
@@ -552,9 +556,9 @@ impl BlockSegmentPostings {
|
||||
self.remaining_data.advance(num_bytes_to_skip);
|
||||
}
|
||||
FreqReadingOption::ReadFreq => {
|
||||
let num_consumed_bytes = self.freq_decoder
|
||||
.uncompress_block_unsorted(self.remaining_data.as_ref(),
|
||||
tf_num_bits);
|
||||
let num_consumed_bytes = self
|
||||
.freq_decoder
|
||||
.uncompress_block_unsorted(self.remaining_data.as_ref(), tf_num_bits);
|
||||
self.remaining_data.advance(num_consumed_bytes);
|
||||
}
|
||||
}
|
||||
@@ -594,7 +598,6 @@ impl BlockSegmentPostings {
|
||||
doc_offset: 0,
|
||||
doc_freq: 0,
|
||||
|
||||
|
||||
remaining_data: OwnedRead::new(vec![]),
|
||||
skip_reader: SkipReader::new(OwnedRead::new(vec![]), IndexRecordOption::Basic),
|
||||
}
|
||||
@@ -616,7 +619,9 @@ impl<'b> Streamer<'b> for BlockSegmentPostings {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::search_within_block;
|
||||
use super::BlockSegmentPostings;
|
||||
use super::BlockSegmentPostingsSkipResult;
|
||||
use super::SegmentPostings;
|
||||
use common::HasLen;
|
||||
use core::Index;
|
||||
@@ -625,10 +630,10 @@ mod tests {
|
||||
use schema::IndexRecordOption;
|
||||
use schema::SchemaBuilder;
|
||||
use schema::Term;
|
||||
use super::exponential_search;
|
||||
use schema::INT_INDEXED;
|
||||
use super::BlockSegmentPostingsSkipResult;
|
||||
use DocId;
|
||||
use super::search_within_block;
|
||||
use SkipResult;
|
||||
|
||||
#[test]
|
||||
fn test_empty_segment_postings() {
|
||||
@@ -645,7 +650,6 @@ mod tests {
|
||||
assert_eq!(postings.doc_freq(), 0);
|
||||
}
|
||||
|
||||
|
||||
fn search_within_block_trivial_but_slow(block: &[u32], target: u32) -> usize {
|
||||
block
|
||||
.iter()
|
||||
@@ -653,11 +657,22 @@ mod tests {
|
||||
.enumerate()
|
||||
.filter(|&(_, ref val)| *val >= target)
|
||||
.next()
|
||||
.unwrap().0
|
||||
.unwrap()
|
||||
.0
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exponentiel_search() {
|
||||
assert_eq!(exponential_search(0, &[1,2]), (0, 1));
|
||||
assert_eq!(exponential_search(1, &[1,2]), (0, 1));
|
||||
assert_eq!(exponential_search(7, &[1,2,3,4,5,6,7,8,9,10,11]), (3,7));
|
||||
}
|
||||
|
||||
fn util_test_search_within_block(block: &[u32], target: u32) {
|
||||
assert_eq!(search_within_block(block, target), search_within_block_trivial_but_slow(block, target));
|
||||
assert_eq!(
|
||||
search_within_block(block, target),
|
||||
search_within_block_trivial_but_slow(block, target)
|
||||
);
|
||||
}
|
||||
|
||||
fn util_test_search_within_block_all(block: &[u32]) {
|
||||
@@ -677,14 +692,14 @@ mod tests {
|
||||
#[test]
|
||||
fn test_search_within_block() {
|
||||
for len in 1u32..128u32 {
|
||||
let v: Vec<u32> = (0..len).map(|i| i*2).collect();
|
||||
let v: Vec<u32> = (0..len).map(|i| i * 2).collect();
|
||||
util_test_search_within_block_all(&v[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_segment_postings() {
|
||||
let mut block_segments = build_block_postings((0..100_000).collect::<Vec<u32>>());
|
||||
let mut block_segments = build_block_postings(&(0..100_000).collect::<Vec<u32>>());
|
||||
let mut offset: u32 = 0u32;
|
||||
// checking that the block before calling advance is empty
|
||||
assert!(block_segments.docs().is_empty());
|
||||
@@ -698,14 +713,45 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_block_postings(docs: Vec<DocId>) -> BlockSegmentPostings {
|
||||
|
||||
#[test]
|
||||
fn test_skip_right_at_new_block() {
|
||||
let mut doc_ids = (0..128).collect::<Vec<u32>>();
|
||||
doc_ids.push(129);
|
||||
doc_ids.push(130);
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.skip_next(128), SkipResult::OverStep);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 130);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.skip_next(129), SkipResult::Reached);
|
||||
assert_eq!(docset.doc(), 129);
|
||||
assert!(docset.advance());
|
||||
assert_eq!(docset.doc(), 130);
|
||||
assert!(!docset.advance());
|
||||
}
|
||||
{
|
||||
let block_segments = build_block_postings(&doc_ids);
|
||||
let mut docset = SegmentPostings::from_block_postings(block_segments, None);
|
||||
assert_eq!(docset.skip_next(131), SkipResult::End);
|
||||
}
|
||||
}
|
||||
|
||||
fn build_block_postings(docs: &[DocId]) -> BlockSegmentPostings {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let int_field = schema_builder.add_u64_field("id", INT_INDEXED);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let mut index_writer = index.writer_with_num_threads(1, 40_000_000).unwrap();
|
||||
let mut last_doc = 0u32;
|
||||
for doc in docs {
|
||||
for &doc in docs {
|
||||
for _ in last_doc..doc {
|
||||
index_writer.add_document(doc!(int_field=>1u64));
|
||||
}
|
||||
@@ -725,30 +771,47 @@ mod tests {
|
||||
#[test]
|
||||
fn test_block_segment_postings_skip() {
|
||||
for i in 0..4 {
|
||||
let mut block_postings = build_block_postings(vec![3]);
|
||||
assert_eq!(block_postings.skip_to(i), BlockSegmentPostingsSkipResult::Success(0u32));
|
||||
assert_eq!(block_postings.skip_to(i), BlockSegmentPostingsSkipResult::Terminated);
|
||||
let mut block_postings = build_block_postings(&[3]);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(i),
|
||||
BlockSegmentPostingsSkipResult::Success(0u32)
|
||||
);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(i),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
}
|
||||
let mut block_postings = build_block_postings(vec![3]);
|
||||
assert_eq!(block_postings.skip_to(4u32), BlockSegmentPostingsSkipResult::Terminated);
|
||||
let mut block_postings = build_block_postings(&[3]);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(4u32),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_block_segment_postings_skip2() {
|
||||
let mut docs = vec![0];
|
||||
for i in 0..1300 {
|
||||
docs.push((i * i / 100) + i);
|
||||
}
|
||||
let mut block_postings = build_block_postings(docs.clone());
|
||||
for i in vec![0, 424, 10000] {
|
||||
assert_eq!(block_postings.skip_to(i), BlockSegmentPostingsSkipResult::Success(0u32));
|
||||
let mut block_postings = build_block_postings(&docs[..]);
|
||||
for i in vec![0, 424, 10000] {
|
||||
assert_eq!(
|
||||
block_postings.skip_to(i),
|
||||
BlockSegmentPostingsSkipResult::Success(0u32)
|
||||
);
|
||||
let docs = block_postings.docs();
|
||||
assert!(docs[0] <= i);
|
||||
assert!(docs.last().cloned().unwrap_or(0u32) >= i);
|
||||
}
|
||||
assert_eq!(block_postings.skip_to(100_000), BlockSegmentPostingsSkipResult::Terminated);
|
||||
assert_eq!(block_postings.skip_to(101_000), BlockSegmentPostingsSkipResult::Terminated);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(100_000),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
assert_eq!(
|
||||
block_postings.skip_to(101_000),
|
||||
BlockSegmentPostingsSkipResult::Terminated
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
use super::TermInfo;
|
||||
use common::{VInt, BinarySerializable};
|
||||
use common::{BinarySerializable, VInt};
|
||||
use common::{CompositeWrite, CountingWriter};
|
||||
use postings::compression::{VIntEncoder, BlockEncoder, COMPRESSION_BLOCK_SIZE};
|
||||
use core::Segment;
|
||||
use directory::WritePtr;
|
||||
use positions::PositionSerializer;
|
||||
use postings::compression::{BlockEncoder, VIntEncoder, COMPRESSION_BLOCK_SIZE};
|
||||
use postings::skip::SkipSerializer;
|
||||
use postings::USE_SKIP_INFO_LIMIT;
|
||||
use schema::Schema;
|
||||
use schema::{Field, FieldEntry, FieldType};
|
||||
use std::io::{self, Write};
|
||||
use termdict::{TermDictionaryBuilder, TermOrdinal};
|
||||
use DocId;
|
||||
use Result;
|
||||
use postings::USE_SKIP_INFO_LIMIT;
|
||||
use postings::skip::SkipSerializer;
|
||||
use positions::PositionSerializer;
|
||||
|
||||
/// `PostingsSerializer` is in charge of serializing
|
||||
/// postings on disk, in the
|
||||
@@ -100,11 +100,11 @@ impl InvertedIndexSerializer {
|
||||
let positionsidx_write = self.positionsidx_write.for_field(field);
|
||||
let field_type: FieldType = (*field_entry.field_type()).clone();
|
||||
FieldSerializer::new(
|
||||
field_type,
|
||||
&field_type,
|
||||
term_dictionary_write,
|
||||
postings_write,
|
||||
positions_write,
|
||||
positionsidx_write
|
||||
positionsidx_write,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -131,11 +131,11 @@ pub struct FieldSerializer<'a> {
|
||||
|
||||
impl<'a> FieldSerializer<'a> {
|
||||
fn new(
|
||||
field_type: FieldType,
|
||||
field_type: &FieldType,
|
||||
term_dictionary_write: &'a mut CountingWriter<WritePtr>,
|
||||
postings_write: &'a mut CountingWriter<WritePtr>,
|
||||
positions_write: &'a mut CountingWriter<WritePtr>,
|
||||
positionsidx_write: &'a mut CountingWriter<WritePtr>
|
||||
positionsidx_write: &'a mut CountingWriter<WritePtr>,
|
||||
) -> io::Result<FieldSerializer<'a>> {
|
||||
let (term_freq_enabled, position_enabled): (bool, bool) = match field_type {
|
||||
FieldType::Str(ref text_options) => {
|
||||
@@ -152,8 +152,9 @@ impl<'a> FieldSerializer<'a> {
|
||||
_ => (false, false),
|
||||
};
|
||||
let term_dictionary_builder =
|
||||
TermDictionaryBuilder::new(term_dictionary_write, field_type)?;
|
||||
let postings_serializer = PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
|
||||
TermDictionaryBuilder::new(term_dictionary_write, &field_type)?;
|
||||
let postings_serializer =
|
||||
PostingsSerializer::new(postings_write, term_freq_enabled, position_enabled);
|
||||
let positions_serializer_opt = if position_enabled {
|
||||
Some(PositionSerializer::new(positions_write, positionsidx_write))
|
||||
} else {
|
||||
@@ -171,14 +172,15 @@ impl<'a> FieldSerializer<'a> {
|
||||
}
|
||||
|
||||
fn current_term_info(&self) -> TermInfo {
|
||||
let positions_idx = self.positions_serializer_opt
|
||||
let positions_idx = self
|
||||
.positions_serializer_opt
|
||||
.as_ref()
|
||||
.map(|positions_serializer| positions_serializer.positions_idx())
|
||||
.unwrap_or(0u64);
|
||||
TermInfo {
|
||||
doc_freq: 0,
|
||||
postings_offset: self.postings_serializer.addr(),
|
||||
positions_idx
|
||||
positions_idx,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,7 +255,7 @@ impl<'a> FieldSerializer<'a> {
|
||||
struct Block {
|
||||
doc_ids: [DocId; COMPRESSION_BLOCK_SIZE],
|
||||
term_freqs: [u32; COMPRESSION_BLOCK_SIZE],
|
||||
len: usize
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
@@ -261,7 +263,7 @@ impl Block {
|
||||
Block {
|
||||
doc_ids: [0u32; COMPRESSION_BLOCK_SIZE],
|
||||
term_freqs: [0u32; COMPRESSION_BLOCK_SIZE],
|
||||
len: 0
|
||||
len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,9 +314,12 @@ pub struct PostingsSerializer<W: Write> {
|
||||
termfreq_sum_enabled: bool,
|
||||
}
|
||||
|
||||
|
||||
impl<W: Write> PostingsSerializer<W> {
|
||||
pub fn new(write: W, termfreq_enabled: bool, termfreq_sum_enabled: bool) -> PostingsSerializer<W> {
|
||||
pub fn new(
|
||||
write: W,
|
||||
termfreq_enabled: bool,
|
||||
termfreq_sum_enabled: bool,
|
||||
) -> PostingsSerializer<W> {
|
||||
PostingsSerializer {
|
||||
output_write: CountingWriter::wrap(write),
|
||||
|
||||
@@ -337,14 +342,16 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
.block_encoder
|
||||
.compress_block_sorted(&self.block.doc_ids(), self.last_doc_id_encoded);
|
||||
self.last_doc_id_encoded = self.block.last_doc();
|
||||
self.skip_write.write_doc(self.last_doc_id_encoded, num_bits);
|
||||
self.skip_write
|
||||
.write_doc(self.last_doc_id_encoded, num_bits);
|
||||
// last el block 0, offset block 1,
|
||||
self.postings_write.extend(block_encoded);
|
||||
}
|
||||
if self.termfreq_enabled {
|
||||
// encode the term_freqs
|
||||
let (num_bits, block_encoded): (u8, &[u8]) =
|
||||
self.block_encoder.compress_block_unsorted(&self.block.term_freqs());
|
||||
let (num_bits, block_encoded): (u8, &[u8]) = self
|
||||
.block_encoder
|
||||
.compress_block_unsorted(&self.block.term_freqs());
|
||||
self.postings_write.extend(block_encoded);
|
||||
self.skip_write.write_term_freq(num_bits);
|
||||
if self.termfreq_sum_enabled {
|
||||
@@ -375,13 +382,15 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
// In that case, the remaining part is encoded
|
||||
// using variable int encoding.
|
||||
{
|
||||
let block_encoded = self.block_encoder
|
||||
let block_encoded = self
|
||||
.block_encoder
|
||||
.compress_vint_sorted(&self.block.doc_ids(), self.last_doc_id_encoded);
|
||||
self.postings_write.write_all(block_encoded)?;
|
||||
}
|
||||
// ... Idem for term frequencies
|
||||
if self.termfreq_enabled {
|
||||
let block_encoded = self.block_encoder
|
||||
let block_encoded = self
|
||||
.block_encoder
|
||||
.compress_vint_unsorted(self.block.term_freqs());
|
||||
self.postings_write.write_all(block_encoded)?;
|
||||
}
|
||||
@@ -392,7 +401,6 @@ impl<W: Write> PostingsSerializer<W> {
|
||||
VInt(skip_data.len() as u64).serialize(&mut self.output_write)?;
|
||||
self.output_write.write_all(skip_data)?;
|
||||
self.output_write.write_all(&self.postings_write[..])?;
|
||||
|
||||
} else {
|
||||
self.output_write.write_all(&self.postings_write[..])?;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use DocId;
|
||||
use common::BinarySerializable;
|
||||
use owned_read::OwnedRead;
|
||||
use postings::compression::COMPRESSION_BLOCK_SIZE;
|
||||
use schema::IndexRecordOption;
|
||||
use DocId;
|
||||
|
||||
pub struct SkipSerializer {
|
||||
buffer: Vec<u8>,
|
||||
@@ -18,8 +18,11 @@ impl SkipSerializer {
|
||||
}
|
||||
|
||||
pub fn write_doc(&mut self, last_doc: DocId, doc_num_bits: u8) {
|
||||
assert!(last_doc > self.prev_doc, "write_doc(...) called with non-increasing doc ids. \
|
||||
Did you forget to call clear maybe?");
|
||||
assert!(
|
||||
last_doc > self.prev_doc,
|
||||
"write_doc(...) called with non-increasing doc ids. \
|
||||
Did you forget to call clear maybe?"
|
||||
);
|
||||
let delta_doc = last_doc - self.prev_doc;
|
||||
self.prev_doc = last_doc;
|
||||
delta_doc.serialize(&mut self.buffer).unwrap();
|
||||
@@ -30,9 +33,10 @@ impl SkipSerializer {
|
||||
self.buffer.push(tf_num_bits);
|
||||
}
|
||||
|
||||
|
||||
pub fn write_total_term_freq(&mut self, tf_sum: u32) {
|
||||
tf_sum.serialize(&mut self.buffer).expect("Should never fail");
|
||||
tf_sum
|
||||
.serialize(&mut self.buffer)
|
||||
.expect("Should never fail");
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
@@ -103,33 +107,32 @@ impl SkipReader {
|
||||
} else {
|
||||
let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
|
||||
self.doc += doc_delta as DocId;
|
||||
self.doc_num_bits = self.owned_read.get(0);
|
||||
self.doc_num_bits = self.owned_read.get(0);
|
||||
match self.skip_info {
|
||||
IndexRecordOption::Basic => {
|
||||
self.owned_read.advance(1);
|
||||
}
|
||||
IndexRecordOption::WithFreqs=> {
|
||||
IndexRecordOption::WithFreqs => {
|
||||
self.tf_num_bits = self.owned_read.get(1);
|
||||
self.owned_read.advance(2);
|
||||
}
|
||||
IndexRecordOption::WithFreqsAndPositions => {
|
||||
self.tf_num_bits = self.owned_read.get(1);
|
||||
self.owned_read.advance(2);
|
||||
self.tf_sum = u32::deserialize(&mut self.owned_read)
|
||||
.expect("Failed reading tf_sum");
|
||||
self.tf_sum =
|
||||
u32::deserialize(&mut self.owned_read).expect("Failed reading tf_sum");
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::{SkipReader, SkipSerializer};
|
||||
use super::IndexRecordOption;
|
||||
use super::{SkipReader, SkipSerializer};
|
||||
use owned_read::OwnedRead;
|
||||
|
||||
#[test]
|
||||
@@ -171,4 +174,4 @@ mod tests {
|
||||
assert_eq!(skip_reader.doc_num_bits(), 5u8);
|
||||
assert!(!skip_reader.advance());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ impl Addr {
|
||||
}
|
||||
|
||||
/// Returns the `Addr` object for `addr + offset`
|
||||
pub fn offset(&self, offset: u32) -> Addr {
|
||||
pub fn offset(self, offset: u32) -> Addr {
|
||||
Addr(self.0.wrapping_add(offset))
|
||||
}
|
||||
|
||||
@@ -55,16 +55,16 @@ impl Addr {
|
||||
Addr((page_id << NUM_BITS_PAGE_ADDR | local_addr) as u32)
|
||||
}
|
||||
|
||||
fn page_id(&self) -> usize {
|
||||
fn page_id(self) -> usize {
|
||||
(self.0 as usize) >> NUM_BITS_PAGE_ADDR
|
||||
}
|
||||
|
||||
fn page_local_addr(&self) -> usize {
|
||||
fn page_local_addr(self) -> usize {
|
||||
(self.0 as usize) & (PAGE_SIZE - 1)
|
||||
}
|
||||
|
||||
/// Returns true if and only if the `Addr` is null.
|
||||
pub fn is_null(&self) -> bool {
|
||||
pub fn is_null(self) -> bool {
|
||||
self.0 == u32::max_value()
|
||||
}
|
||||
}
|
||||
@@ -233,12 +233,12 @@ impl Page {
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) unsafe fn get_ptr(&self, addr: usize) -> *const u8 {
|
||||
self.data.as_ptr().offset(addr as isize)
|
||||
self.data.as_ptr().add(addr)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) unsafe fn get_mut_ptr(&mut self, addr: usize) -> *mut u8 {
|
||||
self.data.as_mut_ptr().offset(addr as isize)
|
||||
self.data.as_mut_ptr().add(addr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ const M: u32 = 0x5bd1_e995;
|
||||
|
||||
#[inline(always)]
|
||||
pub fn murmurhash2(key: &[u8]) -> u32 {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
|
||||
let mut key_ptr: *const u32 = key.as_ptr() as *const u32;
|
||||
let len = key.len() as u32;
|
||||
let mut h: u32 = SEED ^ len;
|
||||
|
||||
@@ -61,7 +61,7 @@ impl Default for KeyValue {
|
||||
}
|
||||
|
||||
impl KeyValue {
|
||||
fn is_empty(&self) -> bool {
|
||||
fn is_empty(self) -> bool {
|
||||
self.key_value_addr.is_null()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,10 +59,10 @@ impl DocSet for AllScorer {
|
||||
}
|
||||
}
|
||||
if self.doc < self.max_doc {
|
||||
return true;
|
||||
true
|
||||
} else {
|
||||
self.state = State::Finished;
|
||||
return false;
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ fn cached_tf_component(fieldnorm: u32, average_fieldnorm: f32) -> f32 {
|
||||
|
||||
fn compute_tf_cache(average_fieldnorm: f32) -> [f32; 256] {
|
||||
let mut cache = [0f32; 256];
|
||||
for fieldnorm_id in 0..256 {
|
||||
for (fieldnorm_id, cache_mut) in cache.iter_mut().enumerate() {
|
||||
let fieldnorm = FieldNormReader::id_to_fieldnorm(fieldnorm_id as u8);
|
||||
cache[fieldnorm_id] = cached_tf_component(fieldnorm, average_fieldnorm);
|
||||
*cache_mut = cached_tf_component(fieldnorm, average_fieldnorm);
|
||||
}
|
||||
cache
|
||||
}
|
||||
@@ -54,7 +54,7 @@ impl BM25Weight {
|
||||
for segment_reader in searcher.segment_readers() {
|
||||
let inverted_index = segment_reader.inverted_index(field);
|
||||
total_num_tokens += inverted_index.total_num_tokens();
|
||||
total_num_docs += segment_reader.max_doc() as u64;
|
||||
total_num_docs += u64::from(segment_reader.max_doc());
|
||||
}
|
||||
let average_fieldnorm = total_num_tokens as f32 / total_num_docs as f32;
|
||||
|
||||
@@ -63,8 +63,7 @@ impl BM25Weight {
|
||||
.map(|term| {
|
||||
let term_doc_freq = searcher.doc_freq(term);
|
||||
idf(term_doc_freq, total_num_docs)
|
||||
})
|
||||
.sum::<f32>();
|
||||
}).sum::<f32>();
|
||||
BM25Weight::new(idf, average_fieldnorm)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ use query::TermQuery;
|
||||
use query::Weight;
|
||||
use schema::IndexRecordOption;
|
||||
use schema::Term;
|
||||
use std::collections::BTreeSet;
|
||||
use Result;
|
||||
use Searcher;
|
||||
|
||||
@@ -27,7 +28,7 @@ impl Clone for BooleanQuery {
|
||||
fn clone(&self) -> Self {
|
||||
self.subqueries
|
||||
.iter()
|
||||
.map(|(x, y)| (x.clone(), y.box_clone()))
|
||||
.map(|(occur, subquery)| (*occur, subquery.box_clone()))
|
||||
.collect::<Vec<_>>()
|
||||
.into()
|
||||
}
|
||||
@@ -41,14 +42,20 @@ impl From<Vec<(Occur, Box<Query>)>> for BooleanQuery {
|
||||
|
||||
impl Query for BooleanQuery {
|
||||
fn weight(&self, searcher: &Searcher, scoring_enabled: bool) -> Result<Box<Weight>> {
|
||||
let sub_weights = self.subqueries
|
||||
let sub_weights = self
|
||||
.subqueries
|
||||
.iter()
|
||||
.map(|&(ref occur, ref subquery)| {
|
||||
Ok((*occur, subquery.weight(searcher, scoring_enabled)?))
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
}).collect::<Result<_>>()?;
|
||||
Ok(Box::new(BooleanWeight::new(sub_weights, scoring_enabled)))
|
||||
}
|
||||
|
||||
fn query_terms(&self, term_set: &mut BTreeSet<Term>) {
|
||||
for (_occur, subquery) in &self.subqueries {
|
||||
subquery.query_terms(term_set);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BooleanQuery {
|
||||
@@ -61,8 +68,7 @@ impl BooleanQuery {
|
||||
let term_query: Box<Query> =
|
||||
Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs));
|
||||
(Occur::Should, term_query)
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
BooleanQuery::from(occur_term_queries)
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ where
|
||||
}
|
||||
|
||||
let scorer: Box<Scorer> = Box::new(Union::<_, TScoreCombiner>::from(scorers));
|
||||
return scorer;
|
||||
scorer
|
||||
}
|
||||
|
||||
pub struct BooleanWeight {
|
||||
|
||||
@@ -69,7 +69,7 @@ mod tests {
|
||||
let query_parser = QueryParser::for_index(&index, vec![text_field]);
|
||||
let query = query_parser.parse_query("+a").unwrap();
|
||||
let searcher = index.searcher();
|
||||
let weight = query.weight(&*searcher, true).unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<TermScorer>::is_type(&*scorer));
|
||||
}
|
||||
@@ -81,13 +81,13 @@ mod tests {
|
||||
let searcher = index.searcher();
|
||||
{
|
||||
let query = query_parser.parse_query("+a +b +c").unwrap();
|
||||
let weight = query.weight(&*searcher, true).unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<Intersection<TermScorer>>::is_type(&*scorer));
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("+a +(b c)").unwrap();
|
||||
let weight = query.weight(&*searcher, true).unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<Intersection<Box<Scorer>>>::is_type(&*scorer));
|
||||
}
|
||||
@@ -100,7 +100,7 @@ mod tests {
|
||||
let searcher = index.searcher();
|
||||
{
|
||||
let query = query_parser.parse_query("+a b").unwrap();
|
||||
let weight = query.weight(&*searcher, true).unwrap();
|
||||
let weight = query.weight(&searcher, true).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
assert!(Downcast::<
|
||||
RequiredOptionalScorer<Box<Scorer>, Box<Scorer>, SumWithCoordsCombiner>,
|
||||
@@ -108,7 +108,7 @@ mod tests {
|
||||
}
|
||||
{
|
||||
let query = query_parser.parse_query("+a b").unwrap();
|
||||
let weight = query.weight(&*searcher, false).unwrap();
|
||||
let weight = query.weight(&searcher, false).unwrap();
|
||||
let scorer = weight.scorer(searcher.segment_reader(0u32)).unwrap();
|
||||
println!("{:?}", scorer.type_name());
|
||||
assert!(Downcast::<TermScorer>::is_type(&*scorer));
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use super::Scorer;
|
||||
use DocSet;
|
||||
use Score;
|
||||
use DocId;
|
||||
use query::Query;
|
||||
use Result;
|
||||
use Searcher;
|
||||
use query::Weight;
|
||||
use DocId;
|
||||
use DocSet;
|
||||
use Result;
|
||||
use Score;
|
||||
use Searcher;
|
||||
use SegmentReader;
|
||||
|
||||
/// `EmptyQuery` is a dummy `Query` in which no document matches.
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
use super::FastFieldFilterWeight;
|
||||
use query::Query;
|
||||
use query::Weight;
|
||||
use Result;
|
||||
use Searcher;
|
||||
use schema::Field;
|
||||
use super::RangeU64;
|
||||
use std::collections::Bound;
|
||||
use common::i64_to_u64;
|
||||
use schema::Schema;
|
||||
use schema::FieldEntry;
|
||||
use TantivyError;
|
||||
use schema::Type;
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
enum TypeInt {
|
||||
U64, I64
|
||||
}
|
||||
|
||||
impl TypeInt {
|
||||
fn value_type(self) -> Type {
|
||||
match self {
|
||||
TypeInt::I64 => Type::I64,
|
||||
TypeInt::U64 => Type::U64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//< TODO i64 range Debug string will not look good in the
|
||||
// current implementation. Defer conversion to the scorer, or
|
||||
// back convert values for Debug.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FastFieldFilterQuery {
|
||||
field: Field,
|
||||
range: RangeU64,
|
||||
int_type: TypeInt, //< just here to check the schema at runtime, as we call `.weight`
|
||||
}
|
||||
|
||||
fn convert_bound_to_u64(bound: Bound<i64>) -> Bound<u64> {
|
||||
match bound {
|
||||
Bound::Included(val) =>
|
||||
Bound::Excluded(i64_to_u64(val)),
|
||||
Bound::Excluded(val) =>
|
||||
Bound::Excluded(i64_to_u64(val)),
|
||||
Bound::Unbounded => Bound::Unbounded
|
||||
}
|
||||
}
|
||||
|
||||
impl FastFieldFilterQuery {
|
||||
|
||||
pub fn new_u64(field: Field, low: Bound<u64>, high: Bound<u64>) -> FastFieldFilterQuery {
|
||||
FastFieldFilterQuery {
|
||||
field: field,
|
||||
range: RangeU64 { low, high },
|
||||
int_type: TypeInt::U64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_i64(field: Field, low: Bound<i64>, high: Bound<i64>) -> FastFieldFilterQuery {
|
||||
FastFieldFilterQuery {
|
||||
field: field,
|
||||
range: RangeU64 {
|
||||
low: convert_bound_to_u64(low),
|
||||
high: convert_bound_to_u64(high)
|
||||
},
|
||||
int_type: TypeInt::I64
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn validate_schema(&self, schema: &Schema) -> Result<()> {
|
||||
let field_entry: &FieldEntry = schema.get_field_entry(self.field);
|
||||
if !field_entry.is_int_fast() {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not an int fast field",
|
||||
field_entry.name()
|
||||
)));
|
||||
}
|
||||
let expected_value_type = self.int_type.value_type();
|
||||
if field_entry.field_type().value_type() != self.int_type.value_type() {
|
||||
return Err(TantivyError::SchemaError(format!(
|
||||
"Field {:?} is not a {:?}",
|
||||
field_entry.name(),
|
||||
expected_value_type
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for FastFieldFilterQuery {
|
||||
fn weight(&self, searcher: &Searcher, _scoring_enabled: bool) -> Result<Box<Weight>> {
|
||||
self.validate_schema(searcher.schema())?;
|
||||
Ok(Box::new(FastFieldFilterWeight::new(self.field, self.range.clone())))
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
use query::Scorer;
|
||||
use fastfield::FastFieldReader;
|
||||
use DocId;
|
||||
use DocSet;
|
||||
use query::fastfield_filter::RangeU64;
|
||||
|
||||
pub(crate) struct FastFieldFilterScorer {
|
||||
fastfield_reader: FastFieldReader<u64>,
|
||||
range: RangeU64,
|
||||
max_doc: DocId,
|
||||
doc: DocId,
|
||||
}
|
||||
|
||||
impl FastFieldFilterScorer {
|
||||
pub fn new(fastfield_reader: FastFieldReader<u64>,
|
||||
range: RangeU64,
|
||||
max_doc: DocId) -> FastFieldFilterScorer {
|
||||
FastFieldFilterScorer {
|
||||
fastfield_reader,
|
||||
range,
|
||||
max_doc,
|
||||
doc: 0u32,
|
||||
}
|
||||
}
|
||||
|
||||
fn within_range(&self, doc: DocId) -> bool {
|
||||
let val = self.fastfield_reader.get(doc);
|
||||
self.range.contains(val)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl DocSet for FastFieldFilterScorer {
|
||||
fn advance(&mut self) -> bool {
|
||||
for doc in (self.doc + 1)..self.max_doc {
|
||||
if self.within_range(doc) {
|
||||
self.doc = doc;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
self.doc = self.max_doc;
|
||||
return false;
|
||||
}
|
||||
|
||||
fn doc(&self) -> u32 {
|
||||
self.doc
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> u32 {
|
||||
self.max_doc
|
||||
}
|
||||
}
|
||||
|
||||
impl Scorer for FastFieldFilterScorer {
|
||||
fn score(&mut self) -> f32 {
|
||||
1f32
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
use query::Weight;
|
||||
use schema::Field;
|
||||
use query::fastfield_filter::RangeU64;
|
||||
use query::fastfield_filter::FastFieldFilterScorer;
|
||||
use SegmentReader;
|
||||
use query::Scorer;
|
||||
use TantivyError;
|
||||
use fastfield::FastFieldReader;
|
||||
|
||||
pub struct FastFieldFilterWeight {
|
||||
field: Field,
|
||||
range: RangeU64,
|
||||
}
|
||||
|
||||
impl FastFieldFilterWeight {
|
||||
pub(crate) fn new(field: Field, range: RangeU64) -> FastFieldFilterWeight {
|
||||
FastFieldFilterWeight {
|
||||
field,
|
||||
range
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Weight for FastFieldFilterWeight {
|
||||
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>, TantivyError> {
|
||||
let fastfield_reader: FastFieldReader<u64> = reader.fast_field_reader(self.field )?;
|
||||
Ok(Box::new(FastFieldFilterScorer::new(fastfield_reader, self.range.clone(), reader.max_doc())))
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
use std::collections::Bound;
|
||||
|
||||
mod fastfield_filter_query;
|
||||
mod fastfield_filter_weight;
|
||||
mod fastfield_filter_scorer;
|
||||
|
||||
|
||||
pub use self::fastfield_filter_query::FastFieldFilterQuery;
|
||||
use self::fastfield_filter_weight::FastFieldFilterWeight;
|
||||
use self::fastfield_filter_scorer::FastFieldFilterScorer;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct RangeU64 {
|
||||
pub low: Bound<u64>,
|
||||
pub high: Bound<u64>,
|
||||
}
|
||||
|
||||
|
||||
impl RangeU64 {
|
||||
|
||||
fn match_high(&self, val: u64) -> bool {
|
||||
match self.high {
|
||||
Bound::Excluded(bound) =>
|
||||
val < bound,
|
||||
Bound::Included(bound) =>
|
||||
val <= bound,
|
||||
Bound::Unbounded =>
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn match_low(&self, val: u64) -> bool {
|
||||
match self.high {
|
||||
Bound::Excluded(bound) =>
|
||||
bound < val,
|
||||
Bound::Included(bound) =>
|
||||
bound <= val,
|
||||
Bound::Unbounded =>
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
pub fn contains(&self, val: u64) -> bool {
|
||||
self.match_low(val) && self.match_high(val)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -10,7 +10,7 @@ lazy_static! {
|
||||
let mut lev_builder_cache = HashMap::new();
|
||||
// TODO make population lazy on a `(distance, val)` basis
|
||||
for distance in 0..3 {
|
||||
for &transposition in [false, true].iter() {
|
||||
for &transposition in &[false, true] {
|
||||
let lev_automaton_builder = LevenshteinAutomatonBuilder::new(distance, transposition);
|
||||
lev_builder_cache.insert((distance, transposition), lev_automaton_builder);
|
||||
}
|
||||
@@ -153,7 +153,7 @@ mod test {
|
||||
|
||||
let fuzzy_query = FuzzyTermQuery::new(term, 1, true);
|
||||
searcher.search(&fuzzy_query, &mut collector).unwrap();
|
||||
let scored_docs = collector.score_docs();
|
||||
let scored_docs = collector.top_docs();
|
||||
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
|
||||
let (score, _) = scored_docs[0];
|
||||
assert_nearly_equals(1f32, score);
|
||||
|
||||
@@ -26,10 +26,11 @@ pub fn intersect_scorers(mut scorers: Vec<Box<Scorer>>) -> Box<Scorer> {
|
||||
(Some(single_docset), None) => single_docset,
|
||||
(Some(left), Some(right)) => {
|
||||
{
|
||||
if [&left, &right].into_iter().all(|scorer| {
|
||||
let all_term_scorers = [&left, &right].into_iter().all(|scorer| {
|
||||
let scorer_ref: &Scorer = (*scorer).borrow();
|
||||
Downcast::<TermScorer>::is_type(scorer_ref)
|
||||
}) {
|
||||
});
|
||||
if all_term_scorers {
|
||||
let left = *Downcast::<TermScorer>::downcast(left).unwrap();
|
||||
let right = *Downcast::<TermScorer>::downcast(right).unwrap();
|
||||
return Box::new(Intersection {
|
||||
@@ -40,12 +41,12 @@ pub fn intersect_scorers(mut scorers: Vec<Box<Scorer>>) -> Box<Scorer> {
|
||||
});
|
||||
}
|
||||
}
|
||||
return Box::new(Intersection {
|
||||
Box::new(Intersection {
|
||||
left,
|
||||
right,
|
||||
others: scorers,
|
||||
num_docsets,
|
||||
});
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
unreachable!();
|
||||
@@ -99,7 +100,7 @@ impl<TDocSet: DocSet, TOtherDocSet: DocSet> Intersection<TDocSet, TOtherDocSet>
|
||||
}
|
||||
|
||||
impl<TDocSet: DocSet, TOtherDocSet: DocSet> DocSet for Intersection<TDocSet, TOtherDocSet> {
|
||||
#[allow(never_loop)]
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::never_loop))]
|
||||
fn advance(&mut self) -> bool {
|
||||
let (left, right) = (&mut self.left, &mut self.right);
|
||||
|
||||
|
||||
@@ -3,11 +3,11 @@ Query
|
||||
*/
|
||||
|
||||
mod all_query;
|
||||
mod empty_query;
|
||||
mod automaton_weight;
|
||||
mod bitset;
|
||||
mod bm25;
|
||||
mod boolean_query;
|
||||
mod empty_query;
|
||||
mod exclude;
|
||||
mod fuzzy_query;
|
||||
mod intersection;
|
||||
@@ -22,13 +22,11 @@ mod scorer;
|
||||
mod term_query;
|
||||
mod union;
|
||||
mod weight;
|
||||
mod fastfield_filter;
|
||||
|
||||
#[cfg(test)]
|
||||
mod vec_docset;
|
||||
|
||||
pub(crate) mod score_combiner;
|
||||
|
||||
pub use self::intersection::Intersection;
|
||||
pub use self::union::Union;
|
||||
|
||||
@@ -36,10 +34,10 @@ pub use self::union::Union;
|
||||
pub use self::vec_docset::VecDocSet;
|
||||
|
||||
pub use self::all_query::{AllQuery, AllScorer, AllWeight};
|
||||
pub use self::empty_query::{EmptyQuery, EmptyWeight, EmptyScorer};
|
||||
pub use self::automaton_weight::AutomatonWeight;
|
||||
pub use self::bitset::BitSetDocSet;
|
||||
pub use self::boolean_query::BooleanQuery;
|
||||
pub use self::empty_query::{EmptyQuery, EmptyScorer, EmptyWeight};
|
||||
pub use self::exclude::Exclude;
|
||||
pub use self::fuzzy_query::FuzzyTermQuery;
|
||||
pub use self::intersection::intersect_scorers;
|
||||
@@ -55,4 +53,53 @@ pub use self::scorer::ConstScorer;
|
||||
pub use self::scorer::Scorer;
|
||||
pub use self::term_query::TermQuery;
|
||||
pub use self::weight::Weight;
|
||||
pub use self::fastfield_filter::FastFieldFilterQuery;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use Index;
|
||||
use schema::{SchemaBuilder, TEXT};
|
||||
use query::QueryParser;
|
||||
use Term;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
#[test]
|
||||
fn test_query_terms() {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let text_field = schema_builder.add_text_field("text", TEXT);
|
||||
let schema = schema_builder.build();
|
||||
let index = Index::create_in_ram(schema);
|
||||
let query_parser = QueryParser::for_index(&index, vec![text_field]);
|
||||
let term_a = Term::from_field_text(text_field, "a");
|
||||
let term_b = Term::from_field_text(text_field, "b");
|
||||
{
|
||||
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
|
||||
query_parser.parse_query("a").unwrap().query_terms(&mut terms_set);
|
||||
let terms: Vec<&Term> = terms_set.iter().collect();
|
||||
assert_eq!(vec![&term_a], terms);
|
||||
}
|
||||
{
|
||||
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
|
||||
query_parser.parse_query("a b").unwrap().query_terms(&mut terms_set);
|
||||
let terms: Vec<&Term> = terms_set.iter().collect();
|
||||
assert_eq!(vec![&term_a, &term_b], terms);
|
||||
}
|
||||
{
|
||||
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
|
||||
query_parser.parse_query("\"a b\"").unwrap().query_terms(&mut terms_set);
|
||||
let terms: Vec<&Term> = terms_set.iter().collect();
|
||||
assert_eq!(vec![&term_a, &term_b], terms);
|
||||
}
|
||||
{
|
||||
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
|
||||
query_parser.parse_query("a a a a a").unwrap().query_terms(&mut terms_set);
|
||||
let terms: Vec<&Term> = terms_set.iter().collect();
|
||||
assert_eq!(vec![&term_a], terms);
|
||||
}
|
||||
{
|
||||
let mut terms_set: BTreeSet<Term> = BTreeSet::new();
|
||||
query_parser.parse_query("a -b").unwrap().query_terms(&mut terms_set);
|
||||
let terms: Vec<&Term> = terms_set.iter().collect();
|
||||
assert_eq!(vec![&term_a, &term_b], terms);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,3 +12,38 @@ pub enum Occur {
|
||||
/// search.
|
||||
MustNot,
|
||||
}
|
||||
|
||||
impl Occur {
|
||||
/// Returns the one-char prefix symbol for this `Occur`.
|
||||
/// - `Should` => '?',
|
||||
/// - `Must` => '+'
|
||||
/// - `Not` => '-'
|
||||
pub fn to_char(self) -> char {
|
||||
match self {
|
||||
Occur::Should => '?',
|
||||
Occur::Must => '+',
|
||||
Occur::MustNot => '-',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compose two occur values.
|
||||
pub fn compose_occur(left: Occur, right: Occur) -> Occur {
|
||||
match left {
|
||||
Occur::Should => right,
|
||||
Occur::Must => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::MustNot
|
||||
} else {
|
||||
Occur::Must
|
||||
}
|
||||
}
|
||||
Occur::MustNot => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::Must
|
||||
} else {
|
||||
Occur::MustNot
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ use query::bm25::BM25Weight;
|
||||
use query::Query;
|
||||
use query::Weight;
|
||||
use schema::{Field, Term};
|
||||
use std::collections::BTreeSet;
|
||||
use Result;
|
||||
|
||||
/// `PhraseQuery` matches a specific sequence of words.
|
||||
@@ -107,4 +108,10 @@ impl Query for PhraseQuery {
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn query_terms(&self, term_set: &mut BTreeSet<Term>) {
|
||||
for (_, query_term) in &self.phrase_terms {
|
||||
term_set.insert(query_term.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,8 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
|
||||
fieldnorm_reader: FieldNormReader,
|
||||
score_needed: bool,
|
||||
) -> PhraseScorer<TPostings> {
|
||||
let max_offset = term_postings.iter()
|
||||
let max_offset = term_postings
|
||||
.iter()
|
||||
.map(|&(offset, _)| offset)
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
@@ -133,8 +134,7 @@ impl<TPostings: Postings> PhraseScorer<TPostings> {
|
||||
.into_iter()
|
||||
.map(|(offset, postings)| {
|
||||
PostingsWithOffset::new(postings, (max_offset - offset) as u32)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
}).collect::<Vec<_>>();
|
||||
PhraseScorer {
|
||||
intersection_docset: Intersection::new(postings_with_offsets),
|
||||
num_docsets,
|
||||
|
||||
@@ -2,9 +2,11 @@ use super::Weight;
|
||||
use collector::Collector;
|
||||
use core::searcher::Searcher;
|
||||
use downcast;
|
||||
use std::collections::BTreeSet;
|
||||
use std::fmt;
|
||||
use Result;
|
||||
use SegmentLocalId;
|
||||
use Term;
|
||||
|
||||
/// The `Query` trait defines a set of documents and a scoring method
|
||||
/// for those documents.
|
||||
@@ -58,6 +60,10 @@ pub trait Query: QueryClone + downcast::Any + fmt::Debug {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Extract all of the terms associated to the query and insert them in the
|
||||
/// term set given in arguments.
|
||||
fn query_terms(&self, _term_set: &mut BTreeSet<Term>) {}
|
||||
|
||||
/// Search works as follows :
|
||||
///
|
||||
/// First the weight object associated to the query is created.
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(clippy::toplevel_ref_arg))]
|
||||
|
||||
use super::user_input_ast::*;
|
||||
use combine::char::*;
|
||||
use combine::error::StreamError;
|
||||
use combine::stream::StreamErrorFor;
|
||||
use combine::*;
|
||||
use query::occur::Occur;
|
||||
use query::query_parser::user_input_ast::UserInputBound;
|
||||
|
||||
parser! {
|
||||
@@ -17,18 +23,25 @@ parser! {
|
||||
fn word[I]()(I) -> String
|
||||
where [I: Stream<Item = char>] {
|
||||
many1(satisfy(|c: char| c.is_alphanumeric()))
|
||||
.and_then(|s: String| {
|
||||
match s.as_str() {
|
||||
"OR" => Err(StreamErrorFor::<I>::unexpected_static_message("OR")),
|
||||
"AND" => Err(StreamErrorFor::<I>::unexpected_static_message("AND")),
|
||||
"NOT" => Err(StreamErrorFor::<I>::unexpected_static_message("NOT")),
|
||||
_ => Ok(s)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
parser! {
|
||||
fn literal[I]()(I) -> UserInputAST
|
||||
fn literal[I]()(I) -> UserInputLeaf
|
||||
where [I: Stream<Item = char>]
|
||||
{
|
||||
let term_val = || {
|
||||
let phrase = (char('"'), many1(satisfy(|c| c != '"')), char('"')).map(|(_, s, _)| s);
|
||||
phrase.or(word())
|
||||
};
|
||||
|
||||
let term_val_with_field = negative_number().or(term_val());
|
||||
let term_query =
|
||||
(field(), char(':'), term_val_with_field).map(|(field_name, _, phrase)| UserInputLiteral {
|
||||
@@ -41,7 +54,7 @@ parser! {
|
||||
});
|
||||
try(term_query)
|
||||
.or(term_default_field)
|
||||
.map(UserInputAST::from)
|
||||
.map(UserInputLeaf::from)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,7 +68,14 @@ parser! {
|
||||
}
|
||||
|
||||
parser! {
|
||||
fn range[I]()(I) -> UserInputAST
|
||||
fn spaces1[I]()(I) -> ()
|
||||
where [I: Stream<Item = char>] {
|
||||
skip_many1(space())
|
||||
}
|
||||
}
|
||||
|
||||
parser! {
|
||||
fn range[I]()(I) -> UserInputLeaf
|
||||
where [I: Stream<Item = char>] {
|
||||
let term_val = || {
|
||||
word().or(negative_number()).or(char('*').map(|_| "*".to_string()))
|
||||
@@ -77,7 +97,7 @@ parser! {
|
||||
string("TO"),
|
||||
spaces(),
|
||||
upper_bound,
|
||||
).map(|(field, lower, _, _, _, upper)| UserInputAST::Range {
|
||||
).map(|(field, lower, _, _, _, upper)| UserInputLeaf::Range {
|
||||
field,
|
||||
lower,
|
||||
upper
|
||||
@@ -88,13 +108,50 @@ parser! {
|
||||
parser! {
|
||||
fn leaf[I]()(I) -> UserInputAST
|
||||
where [I: Stream<Item = char>] {
|
||||
(char('-'), leaf())
|
||||
.map(|(_, expr)| UserInputAST::Not(Box::new(expr)))
|
||||
.or((char('+'), leaf()).map(|(_, expr)| UserInputAST::Must(Box::new(expr))))
|
||||
(char('-'), leaf()).map(|(_, expr)| expr.unary(Occur::MustNot) )
|
||||
.or((char('+'), leaf()).map(|(_, expr)| expr.unary(Occur::Must) ))
|
||||
.or((char('('), parse_to_ast(), char(')')).map(|(_, expr, _)| expr))
|
||||
.or(char('*').map(|_| UserInputAST::All))
|
||||
.or(try(range()))
|
||||
.or(literal())
|
||||
.or(char('*').map(|_| UserInputAST::from(UserInputLeaf::All) ))
|
||||
.or(try(
|
||||
(string("NOT"), spaces1(), leaf()).map(|(_, _, expr)| expr.unary(Occur::MustNot))
|
||||
)
|
||||
)
|
||||
.or(try(
|
||||
range().map(UserInputAST::from)
|
||||
)
|
||||
)
|
||||
.or(literal().map(|leaf| UserInputAST::Leaf(Box::new(leaf))))
|
||||
}
|
||||
}
|
||||
|
||||
enum BinaryOperand {
|
||||
Or,
|
||||
And,
|
||||
}
|
||||
|
||||
parser! {
|
||||
fn binary_operand[I]()(I) -> BinaryOperand
|
||||
where [I: Stream<Item = char>] {
|
||||
(spaces1(),
|
||||
(
|
||||
string("AND").map(|_| BinaryOperand::And)
|
||||
.or(string("OR").map(|_| BinaryOperand::Or))
|
||||
),
|
||||
spaces1()).map(|(_, op,_)| op)
|
||||
}
|
||||
}
|
||||
|
||||
enum Element {
|
||||
SingleEl(UserInputAST),
|
||||
NormalDisjunctive(Vec<Vec<UserInputAST>>),
|
||||
}
|
||||
|
||||
impl Element {
|
||||
pub fn into_dnf(self) -> Vec<Vec<UserInputAST>> {
|
||||
match self {
|
||||
Element::NormalDisjunctive(conjunctions) => conjunctions,
|
||||
Element::SingleEl(el) => vec![vec![el]],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,14 +159,56 @@ parser! {
|
||||
pub fn parse_to_ast[I]()(I) -> UserInputAST
|
||||
where [I: Stream<Item = char>]
|
||||
{
|
||||
sep_by(leaf(), spaces())
|
||||
.map(|subqueries: Vec<UserInputAST>| {
|
||||
if subqueries.len() == 1 {
|
||||
subqueries.into_iter().next().unwrap()
|
||||
} else {
|
||||
UserInputAST::Clause(subqueries.into_iter().map(Box::new).collect())
|
||||
}
|
||||
})
|
||||
(
|
||||
try(
|
||||
chainl1(
|
||||
leaf().map(Element::SingleEl),
|
||||
binary_operand().map(|op: BinaryOperand|
|
||||
move |left: Element, right: Element| {
|
||||
let mut dnf = left.into_dnf();
|
||||
if let Element::SingleEl(el) = right {
|
||||
match op {
|
||||
BinaryOperand::And => {
|
||||
if let Some(last) = dnf.last_mut() {
|
||||
last.push(el);
|
||||
}
|
||||
}
|
||||
BinaryOperand::Or => {
|
||||
dnf.push(vec!(el));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
unreachable!("Please report.")
|
||||
}
|
||||
Element::NormalDisjunctive(dnf)
|
||||
}
|
||||
)
|
||||
)
|
||||
.map(|el| el.into_dnf())
|
||||
.map(|fnd| {
|
||||
if fnd.len() == 1 {
|
||||
UserInputAST::and(fnd.into_iter().next().unwrap()) //< safe
|
||||
} else {
|
||||
let conjunctions = fnd
|
||||
.into_iter()
|
||||
.map(UserInputAST::and)
|
||||
.collect();
|
||||
UserInputAST::or(conjunctions)
|
||||
}
|
||||
})
|
||||
)
|
||||
.or(
|
||||
sep_by(leaf(), spaces())
|
||||
.map(|subqueries: Vec<UserInputAST>| {
|
||||
if subqueries.len() == 1 {
|
||||
subqueries.into_iter().next().unwrap()
|
||||
} else {
|
||||
UserInputAST::Clause(subqueries.into_iter().collect())
|
||||
}
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,6 +227,40 @@ mod test {
|
||||
assert!(parse_to_ast().parse(query).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_not_op() {
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("NOT")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
test_parse_query_to_ast_helper("NOTa", "\"NOTa\"");
|
||||
test_parse_query_to_ast_helper("NOT a", "-(\"a\")");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast_binary_op() {
|
||||
test_parse_query_to_ast_helper("a AND b", "(+(\"a\") +(\"b\"))");
|
||||
test_parse_query_to_ast_helper("a OR b", "(?(\"a\") ?(\"b\"))");
|
||||
test_parse_query_to_ast_helper("a OR b AND c", "(?(\"a\") ?((+(\"b\") +(\"c\"))))");
|
||||
test_parse_query_to_ast_helper("a AND b AND c", "(+(\"a\") +(\"b\") +(\"c\"))");
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("a OR b aaa")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("a AND b aaa")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("aaa a OR b ")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", parse_to_ast().parse("aaa ccc a OR b ")),
|
||||
"Err(UnexpectedParse)"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_query_to_ast() {
|
||||
test_parse_query_to_ast_helper("+(a b) +d", "(+((\"a\" \"b\")) +(\"d\"))");
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
use super::logical_ast::*;
|
||||
use super::query_grammar::parse_to_ast;
|
||||
use super::user_input_ast::*;
|
||||
use combine::Parser;
|
||||
use core::Index;
|
||||
use query::occur::compose_occur;
|
||||
use query::query_parser::logical_ast::LogicalAST;
|
||||
use query::AllQuery;
|
||||
use query::BooleanQuery;
|
||||
use query::EmptyQuery;
|
||||
use query::Occur;
|
||||
use query::PhraseQuery;
|
||||
use query::Query;
|
||||
@@ -17,9 +21,6 @@ use std::num::ParseIntError;
|
||||
use std::ops::Bound;
|
||||
use std::str::FromStr;
|
||||
use tokenizer::TokenizerManager;
|
||||
use combine::Parser;
|
||||
use query::EmptyQuery;
|
||||
|
||||
|
||||
/// Possible error that may happen when parsing a query.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
@@ -57,6 +58,27 @@ impl From<ParseIntError> for QueryParserError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Recursively remove empty clause from the AST
|
||||
///
|
||||
/// Returns `None` iff the `logical_ast` ended up being empty.
|
||||
fn trim_ast(logical_ast: LogicalAST) -> Option<LogicalAST> {
|
||||
match logical_ast {
|
||||
LogicalAST::Clause(children) => {
|
||||
let trimmed_children = children
|
||||
.into_iter()
|
||||
.flat_map(|(occur, child)| {
|
||||
trim_ast(child).map(|trimmed_child| (occur, trimmed_child))
|
||||
}).collect::<Vec<_>>();
|
||||
if trimmed_children.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(LogicalAST::Clause(trimmed_children))
|
||||
}
|
||||
}
|
||||
_ => Some(logical_ast),
|
||||
}
|
||||
}
|
||||
|
||||
/// Tantivy's Query parser
|
||||
///
|
||||
/// The language covered by the current parser is extremely simple.
|
||||
@@ -79,12 +101,22 @@ impl From<ParseIntError> for QueryParserError {
|
||||
///
|
||||
/// Switching to a default of `AND` can be done by calling `.set_conjunction_by_default()`.
|
||||
///
|
||||
///
|
||||
/// * boolean operators `AND`, `OR`. `AND` takes precedence over `OR`, so that `a AND b OR c` is interpreted
|
||||
/// as `(a AND b) OR c`.
|
||||
///
|
||||
/// * In addition to the boolean operators, the `-`, `+` can help define. These operators
|
||||
/// are sufficient to axpress all queries using boolean operators. For instance `x AND y OR z` can
|
||||
/// be written (`(+x +y) z`). In addition, these operators can help define "required optional"
|
||||
/// queries. `(+x y)` matches the same document set as simply `x`, but `y` will help refining the score.
|
||||
///
|
||||
/// * negative terms: By prepending a term by a `-`, a term can be excluded
|
||||
/// from the search. This is useful for disambiguating a query.
|
||||
/// e.g. `apple -fruit`
|
||||
///
|
||||
/// * must terms: By prepending a term by a `+`, a term can be made required for the search.
|
||||
///
|
||||
///
|
||||
/// * phrase terms: Quoted terms become phrase searches on fields that have positions indexed.
|
||||
/// e.g., `title:"Barack Obama"` will only find documents that have "barack" immediately followed
|
||||
/// by "obama".
|
||||
@@ -155,8 +187,9 @@ impl QueryParser {
|
||||
|
||||
/// Parse the user query into an AST.
|
||||
fn parse_query_to_logical_ast(&self, query: &str) -> Result<LogicalAST, QueryParserError> {
|
||||
let (user_input_ast, _remaining) =
|
||||
parse_to_ast().parse(query).map_err(|_| QueryParserError::SyntaxError)?;
|
||||
let (user_input_ast, _remaining) = parse_to_ast()
|
||||
.parse(query)
|
||||
.map_err(|_| QueryParserError::SyntaxError)?;
|
||||
self.compute_logical_ast(user_input_ast)
|
||||
}
|
||||
|
||||
@@ -201,14 +234,15 @@ impl QueryParser {
|
||||
}
|
||||
FieldType::Str(ref str_options) => {
|
||||
if let Some(option) = str_options.get_indexing_options() {
|
||||
let mut tokenizer = self.tokenizer_manager.get(option.tokenizer()).ok_or_else(
|
||||
|| {
|
||||
QueryParserError::UnknownTokenizer(
|
||||
field_entry.name().to_string(),
|
||||
option.tokenizer().to_string(),
|
||||
)
|
||||
},
|
||||
)?;
|
||||
let mut tokenizer =
|
||||
self.tokenizer_manager
|
||||
.get(option.tokenizer())
|
||||
.ok_or_else(|| {
|
||||
QueryParserError::UnknownTokenizer(
|
||||
field_entry.name().to_string(),
|
||||
option.tokenizer().to_string(),
|
||||
)
|
||||
})?;
|
||||
let mut terms: Vec<(usize, Term)> = Vec::new();
|
||||
let mut token_stream = tokenizer.token_stream(phrase);
|
||||
token_stream.process(&mut |token| {
|
||||
@@ -258,12 +292,9 @@ impl QueryParser {
|
||||
) -> Result<Option<LogicalLiteral>, QueryParserError> {
|
||||
let terms = self.compute_terms_for_string(field, phrase)?;
|
||||
match &terms[..] {
|
||||
[] =>
|
||||
Ok(None),
|
||||
[(_, term)] =>
|
||||
Ok(Some(LogicalLiteral::Term(term.clone()))),
|
||||
_ =>
|
||||
Ok(Some(LogicalLiteral::Phrase(terms.clone()))),
|
||||
[] => Ok(None),
|
||||
[(_, term)] => Ok(Some(LogicalLiteral::Term(term.clone()))),
|
||||
_ => Ok(Some(LogicalLiteral::Phrase(terms.clone()))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,7 +306,11 @@ impl QueryParser {
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_bound(&self, field: Field, bound: &UserInputBound) -> Result<Bound<Term>, QueryParserError> {
|
||||
fn resolve_bound(
|
||||
&self,
|
||||
field: Field,
|
||||
bound: &UserInputBound,
|
||||
) -> Result<Bound<Term>, QueryParserError> {
|
||||
if bound.term_str() == "*" {
|
||||
return Ok(Bound::Unbounded);
|
||||
}
|
||||
@@ -315,56 +350,30 @@ impl QueryParser {
|
||||
let default_occur = self.default_occur();
|
||||
let mut logical_sub_queries: Vec<(Occur, LogicalAST)> = Vec::new();
|
||||
for sub_query in sub_queries {
|
||||
let (occur, sub_ast) = self.compute_logical_ast_with_occur(*sub_query)?;
|
||||
let (occur, sub_ast) = self.compute_logical_ast_with_occur(sub_query)?;
|
||||
let new_occur = compose_occur(default_occur, occur);
|
||||
logical_sub_queries.push((new_occur, sub_ast));
|
||||
}
|
||||
Ok((Occur::Should, LogicalAST::Clause(logical_sub_queries)))
|
||||
}
|
||||
UserInputAST::Not(subquery) => {
|
||||
let (occur, logical_sub_queries) = self.compute_logical_ast_with_occur(*subquery)?;
|
||||
Ok((compose_occur(Occur::MustNot, occur), logical_sub_queries))
|
||||
UserInputAST::Unary(left_occur, subquery) => {
|
||||
let (right_occur, logical_sub_queries) =
|
||||
self.compute_logical_ast_with_occur(*subquery)?;
|
||||
Ok((compose_occur(left_occur, right_occur), logical_sub_queries))
|
||||
}
|
||||
UserInputAST::Must(subquery) => {
|
||||
let (occur, logical_sub_queries) = self.compute_logical_ast_with_occur(*subquery)?;
|
||||
Ok((compose_occur(Occur::Must, occur), logical_sub_queries))
|
||||
}
|
||||
UserInputAST::Range {
|
||||
field,
|
||||
lower,
|
||||
upper,
|
||||
} => {
|
||||
let fields = self.resolved_fields(&field)?;
|
||||
let mut clauses = fields
|
||||
.iter()
|
||||
.map(|&field| {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let value_type = field_entry.field_type().value_type();
|
||||
Ok(LogicalAST::Leaf(Box::new(LogicalLiteral::Range {
|
||||
field,
|
||||
value_type,
|
||||
lower: self.resolve_bound(field, &lower)?,
|
||||
upper: self.resolve_bound(field, &upper)?,
|
||||
})))
|
||||
})
|
||||
.collect::<Result<Vec<_>, QueryParserError>>()?;
|
||||
let result_ast = if clauses.len() == 1 {
|
||||
clauses.pop().unwrap()
|
||||
} else {
|
||||
LogicalAST::Clause(
|
||||
clauses
|
||||
.into_iter()
|
||||
.map(|clause| (Occur::Should, clause))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
UserInputAST::Leaf(leaf) => {
|
||||
let result_ast = self.compute_logical_ast_from_leaf(*leaf)?;
|
||||
Ok((Occur::Should, result_ast))
|
||||
}
|
||||
UserInputAST::All => Ok((
|
||||
Occur::Should,
|
||||
LogicalAST::Leaf(Box::new(LogicalLiteral::All)),
|
||||
)),
|
||||
UserInputAST::Leaf(literal) => {
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_logical_ast_from_leaf(
|
||||
&self,
|
||||
leaf: UserInputLeaf,
|
||||
) -> Result<LogicalAST, QueryParserError> {
|
||||
match leaf {
|
||||
UserInputLeaf::Literal(literal) => {
|
||||
let term_phrases: Vec<(Field, String)> = match literal.field_name {
|
||||
Some(ref field_name) => {
|
||||
let field = self.resolve_field_name(field_name)?;
|
||||
@@ -387,36 +396,43 @@ impl QueryParser {
|
||||
asts.push(LogicalAST::Leaf(Box::new(ast)));
|
||||
}
|
||||
}
|
||||
let result_ast = if asts.is_empty() {
|
||||
// this should never happen
|
||||
return Err(QueryParserError::SyntaxError);
|
||||
} else if asts.len() == 1 {
|
||||
asts[0].clone()
|
||||
let result_ast: LogicalAST = if asts.len() == 1 {
|
||||
asts.into_iter().next().unwrap()
|
||||
} else {
|
||||
LogicalAST::Clause(asts.into_iter().map(|ast| (Occur::Should, ast)).collect())
|
||||
};
|
||||
Ok((Occur::Should, result_ast))
|
||||
Ok(result_ast)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compose two occur values.
|
||||
fn compose_occur(left: Occur, right: Occur) -> Occur {
|
||||
match left {
|
||||
Occur::Should => right,
|
||||
Occur::Must => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::MustNot
|
||||
} else {
|
||||
Occur::Must
|
||||
}
|
||||
}
|
||||
Occur::MustNot => {
|
||||
if right == Occur::MustNot {
|
||||
Occur::Must
|
||||
} else {
|
||||
Occur::MustNot
|
||||
UserInputLeaf::All => Ok(LogicalAST::Leaf(Box::new(LogicalLiteral::All))),
|
||||
UserInputLeaf::Range {
|
||||
field,
|
||||
lower,
|
||||
upper,
|
||||
} => {
|
||||
let fields = self.resolved_fields(&field)?;
|
||||
let mut clauses = fields
|
||||
.iter()
|
||||
.map(|&field| {
|
||||
let field_entry = self.schema.get_field_entry(field);
|
||||
let value_type = field_entry.field_type().value_type();
|
||||
Ok(LogicalAST::Leaf(Box::new(LogicalLiteral::Range {
|
||||
field,
|
||||
value_type,
|
||||
lower: self.resolve_bound(field, &lower)?,
|
||||
upper: self.resolve_bound(field, &upper)?,
|
||||
})))
|
||||
}).collect::<Result<Vec<_>, QueryParserError>>()?;
|
||||
let result_ast = if clauses.len() == 1 {
|
||||
clauses.pop().unwrap()
|
||||
} else {
|
||||
LogicalAST::Clause(
|
||||
clauses
|
||||
.into_iter()
|
||||
.map(|clause| (Occur::Should, clause))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
Ok(result_ast)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -425,31 +441,38 @@ fn compose_occur(left: Occur, right: Occur) -> Occur {
|
||||
fn convert_literal_to_query(logical_literal: LogicalLiteral) -> Box<Query> {
|
||||
match logical_literal {
|
||||
LogicalLiteral::Term(term) => Box::new(TermQuery::new(term, IndexRecordOption::WithFreqs)),
|
||||
LogicalLiteral::Phrase(term_with_offsets) => Box::new(PhraseQuery::new_with_offset(term_with_offsets)),
|
||||
LogicalLiteral::Phrase(term_with_offsets) => {
|
||||
Box::new(PhraseQuery::new_with_offset(term_with_offsets))
|
||||
}
|
||||
LogicalLiteral::Range {
|
||||
field,
|
||||
value_type,
|
||||
lower,
|
||||
upper,
|
||||
} => Box::new(RangeQuery::new_term_bounds(field, value_type, lower, upper)),
|
||||
} => Box::new(RangeQuery::new_term_bounds(
|
||||
field, value_type, &lower, &upper,
|
||||
)),
|
||||
LogicalLiteral::All => Box::new(AllQuery),
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_to_query(logical_ast: LogicalAST) -> Box<Query> {
|
||||
match logical_ast {
|
||||
LogicalAST::Clause(clause) => {
|
||||
if clause.is_empty() {
|
||||
Box::new(EmptyQuery)
|
||||
} else {
|
||||
let occur_subqueries = clause
|
||||
.into_iter()
|
||||
.map(|(occur, subquery)| (occur, convert_to_query(subquery)))
|
||||
.collect::<Vec<_>>();
|
||||
Box::new(BooleanQuery::from(occur_subqueries))
|
||||
}
|
||||
match trim_ast(logical_ast) {
|
||||
Some(LogicalAST::Clause(trimmed_clause)) => {
|
||||
let occur_subqueries = trimmed_clause
|
||||
.into_iter()
|
||||
.map(|(occur, subquery)| (occur, convert_to_query(subquery)))
|
||||
.collect::<Vec<_>>();
|
||||
assert!(
|
||||
!occur_subqueries.is_empty(),
|
||||
"Should not be empty after trimming"
|
||||
);
|
||||
Box::new(BooleanQuery::from(occur_subqueries))
|
||||
}
|
||||
LogicalAST::Leaf(logical_literal) => convert_literal_to_query(*logical_literal),
|
||||
Some(LogicalAST::Leaf(trimmed_logical_literal)) => {
|
||||
convert_literal_to_query(*trimmed_logical_literal)
|
||||
}
|
||||
None => Box::new(EmptyQuery),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -462,12 +485,17 @@ mod test {
|
||||
use schema::Field;
|
||||
use schema::{IndexRecordOption, TextFieldIndexing, TextOptions};
|
||||
use schema::{SchemaBuilder, Term, INT_INDEXED, STORED, STRING, TEXT};
|
||||
use tokenizer::SimpleTokenizer;
|
||||
use tokenizer::TokenizerManager;
|
||||
use tokenizer::{LowerCaser, SimpleTokenizer, StopWordFilter, Tokenizer, TokenizerManager};
|
||||
use Index;
|
||||
|
||||
fn make_query_parser() -> QueryParser {
|
||||
let mut schema_builder = SchemaBuilder::default();
|
||||
let text_field_indexing = TextFieldIndexing::default()
|
||||
.set_tokenizer("en_with_stop_words")
|
||||
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
|
||||
let text_options = TextOptions::default()
|
||||
.set_indexing_options(text_field_indexing)
|
||||
.set_stored();
|
||||
let title = schema_builder.add_text_field("title", TEXT);
|
||||
let text = schema_builder.add_text_field("text", TEXT);
|
||||
schema_builder.add_i64_field("signed", INT_INDEXED);
|
||||
@@ -476,9 +504,16 @@ mod test {
|
||||
schema_builder.add_text_field("notindexed_u64", STORED);
|
||||
schema_builder.add_text_field("notindexed_i64", STORED);
|
||||
schema_builder.add_text_field("nottokenized", STRING);
|
||||
schema_builder.add_text_field("with_stop_words", text_options);
|
||||
let schema = schema_builder.build();
|
||||
let default_fields = vec![title, text];
|
||||
let tokenizer_manager = TokenizerManager::default();
|
||||
tokenizer_manager.register(
|
||||
"en_with_stop_words",
|
||||
SimpleTokenizer
|
||||
.filter(LowerCaser)
|
||||
.filter(StopWordFilter::remove(vec!["the".to_string()])),
|
||||
);
|
||||
QueryParser::new(schema, default_fields, tokenizer_manager)
|
||||
}
|
||||
|
||||
@@ -548,16 +583,8 @@ mod test {
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_query_empty() {
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
"",
|
||||
"<emptyclause>",
|
||||
false,
|
||||
);
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
" ",
|
||||
"<emptyclause>",
|
||||
false,
|
||||
);
|
||||
test_parse_query_to_logical_ast_helper("", "<emptyclause>", false);
|
||||
test_parse_query_to_logical_ast_helper(" ", "<emptyclause>", false);
|
||||
let query_parser = make_query_parser();
|
||||
let query_result = query_parser.parse_query("");
|
||||
let query = query_result.unwrap();
|
||||
@@ -670,11 +697,7 @@ mod test {
|
||||
"(Excluded(Term([0, 0, 0, 0, 116, 105, 116, 105])) TO Unbounded)",
|
||||
false,
|
||||
);
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
"*",
|
||||
"*",
|
||||
false,
|
||||
);
|
||||
test_parse_query_to_logical_ast_helper("*", "*", false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -747,6 +770,13 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_query_parser_not_empty_but_no_tokens() {
|
||||
let query_parser = make_query_parser();
|
||||
assert!(query_parser.parse_query(" !, ").is_ok());
|
||||
assert!(query_parser.parse_query("with_stop_words:the").is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_parse_query_to_ast_conjunction() {
|
||||
test_parse_query_to_logical_ast_helper(
|
||||
|
||||
@@ -1,4 +1,39 @@
|
||||
use std::fmt;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
|
||||
use query::Occur;
|
||||
|
||||
pub enum UserInputLeaf {
|
||||
Literal(UserInputLiteral),
|
||||
All,
|
||||
Range {
|
||||
field: Option<String>,
|
||||
lower: UserInputBound,
|
||||
upper: UserInputBound,
|
||||
},
|
||||
}
|
||||
|
||||
impl Debug for UserInputLeaf {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
|
||||
match self {
|
||||
UserInputLeaf::Literal(literal) => literal.fmt(formatter),
|
||||
UserInputLeaf::Range {
|
||||
ref field,
|
||||
ref lower,
|
||||
ref upper,
|
||||
} => {
|
||||
if let Some(ref field) = field {
|
||||
write!(formatter, "{}:", field)?;
|
||||
}
|
||||
lower.display_lower(formatter)?;
|
||||
write!(formatter, " TO ")?;
|
||||
upper.display_upper(formatter)?;
|
||||
Ok(())
|
||||
}
|
||||
UserInputLeaf::All => write!(formatter, "*"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UserInputLiteral {
|
||||
pub field_name: Option<String>,
|
||||
@@ -43,28 +78,93 @@ impl UserInputBound {
|
||||
}
|
||||
|
||||
pub enum UserInputAST {
|
||||
Clause(Vec<Box<UserInputAST>>),
|
||||
Not(Box<UserInputAST>),
|
||||
Must(Box<UserInputAST>),
|
||||
Range {
|
||||
field: Option<String>,
|
||||
lower: UserInputBound,
|
||||
upper: UserInputBound,
|
||||
},
|
||||
All,
|
||||
Leaf(Box<UserInputLiteral>),
|
||||
Clause(Vec<UserInputAST>),
|
||||
Unary(Occur, Box<UserInputAST>),
|
||||
// Not(Box<UserInputAST>),
|
||||
// Should(Box<UserInputAST>),
|
||||
// Must(Box<UserInputAST>),
|
||||
Leaf(Box<UserInputLeaf>),
|
||||
}
|
||||
|
||||
impl From<UserInputLiteral> for UserInputAST {
|
||||
fn from(literal: UserInputLiteral) -> UserInputAST {
|
||||
UserInputAST::Leaf(Box::new(literal))
|
||||
impl UserInputAST {
|
||||
pub fn unary(self, occur: Occur) -> UserInputAST {
|
||||
UserInputAST::Unary(occur, Box::new(self))
|
||||
}
|
||||
|
||||
fn compose(occur: Occur, asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
assert!(occur != Occur::MustNot);
|
||||
assert!(!asts.is_empty());
|
||||
if asts.len() == 1 {
|
||||
asts.into_iter().next().unwrap() //< safe
|
||||
} else {
|
||||
UserInputAST::Clause(
|
||||
asts.into_iter()
|
||||
.map(|ast: UserInputAST| ast.unary(occur))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn and(asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
UserInputAST::compose(Occur::Must, asts)
|
||||
}
|
||||
|
||||
pub fn or(asts: Vec<UserInputAST>) -> UserInputAST {
|
||||
UserInputAST::compose(Occur::Should, asts)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
impl UserInputAST {
|
||||
|
||||
fn compose_occur(self, occur: Occur) -> UserInputAST {
|
||||
match self {
|
||||
UserInputAST::Not(other) => {
|
||||
let new_occur = compose_occur(Occur::MustNot, occur);
|
||||
other.simplify()
|
||||
}
|
||||
_ => {
|
||||
self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simplify(self) -> UserInputAST {
|
||||
match self {
|
||||
UserInputAST::Clause(els) => {
|
||||
if els.len() == 1 {
|
||||
return els.into_iter().next().unwrap();
|
||||
} else {
|
||||
return self;
|
||||
}
|
||||
}
|
||||
UserInputAST::Not(els) => {
|
||||
if els.len() == 1 {
|
||||
return els.into_iter().next().unwrap();
|
||||
} else {
|
||||
return self;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
impl From<UserInputLiteral> for UserInputLeaf {
|
||||
fn from(literal: UserInputLiteral) -> UserInputLeaf {
|
||||
UserInputLeaf::Literal(literal)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserInputLeaf> for UserInputAST {
|
||||
fn from(leaf: UserInputLeaf) -> UserInputAST {
|
||||
UserInputAST::Leaf(Box::new(leaf))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UserInputAST {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
match *self {
|
||||
UserInputAST::Must(ref subquery) => write!(formatter, "+({:?})", subquery),
|
||||
UserInputAST::Clause(ref subqueries) => {
|
||||
if subqueries.is_empty() {
|
||||
write!(formatter, "<emptyclause>")?;
|
||||
@@ -78,21 +178,9 @@ impl fmt::Debug for UserInputAST {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
UserInputAST::Not(ref subquery) => write!(formatter, "-({:?})", subquery),
|
||||
UserInputAST::Range {
|
||||
ref field,
|
||||
ref lower,
|
||||
ref upper,
|
||||
} => {
|
||||
if let &Some(ref field) = field {
|
||||
write!(formatter, "{}:", field)?;
|
||||
}
|
||||
lower.display_lower(formatter)?;
|
||||
write!(formatter, " TO ")?;
|
||||
upper.display_upper(formatter)?;
|
||||
Ok(())
|
||||
UserInputAST::Unary(ref occur, ref subquery) => {
|
||||
write!(formatter, "{}({:?})", occur.to_char(), subquery)
|
||||
}
|
||||
UserInputAST::All => write!(formatter, "*"),
|
||||
UserInputAST::Leaf(ref subquery) => write!(formatter, "{:?}", subquery),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ fn map_bound<TFrom, TTo, Transform: Fn(&TFrom) -> TTo>(
|
||||
/// let docs_in_the_sixties = RangeQuery::new_u64(year_field, 1960..1970);
|
||||
///
|
||||
/// let mut count_collector = CountCollector::default();
|
||||
/// docs_in_the_sixties.search(&*searcher, &mut count_collector)?;
|
||||
/// docs_in_the_sixties.search(&searcher, &mut count_collector)?;
|
||||
///
|
||||
/// let num_60s_books = count_collector.count();
|
||||
///
|
||||
@@ -96,8 +96,8 @@ impl RangeQuery {
|
||||
pub fn new_term_bounds(
|
||||
field: Field,
|
||||
value_type: Type,
|
||||
left_bound: Bound<Term>,
|
||||
right_bound: Bound<Term>,
|
||||
left_bound: &Bound<Term>,
|
||||
right_bound: &Bound<Term>,
|
||||
) -> RangeQuery {
|
||||
let verify_and_unwrap_term = |val: &Term| {
|
||||
assert_eq!(field, val.field());
|
||||
@@ -184,11 +184,7 @@ impl RangeQuery {
|
||||
///
|
||||
/// If the field is not of the type `Str`, tantivy
|
||||
/// will panic when the `Weight` object is created.
|
||||
pub fn new_str_bounds<'b>(
|
||||
field: Field,
|
||||
left: Bound<&'b str>,
|
||||
right: Bound<&'b str>,
|
||||
) -> RangeQuery {
|
||||
pub fn new_str_bounds(field: Field, left: Bound<&str>, right: Bound<&str>) -> RangeQuery {
|
||||
let make_term_val = |val: &&str| val.as_bytes().to_vec();
|
||||
RangeQuery {
|
||||
field,
|
||||
@@ -202,7 +198,7 @@ impl RangeQuery {
|
||||
///
|
||||
/// If the field is not of the type `Str`, tantivy
|
||||
/// will panic when the `Weight` object is created.
|
||||
pub fn new_str<'b>(field: Field, range: Range<&'b str>) -> RangeQuery {
|
||||
pub fn new_str(field: Field, range: Range<&str>) -> RangeQuery {
|
||||
RangeQuery::new_str_bounds(
|
||||
field,
|
||||
Bound::Included(range.start),
|
||||
@@ -332,7 +328,7 @@ mod tests {
|
||||
|
||||
// ... or `1960..=1969` if inclusive range is enabled.
|
||||
let mut count_collector = CountCollector::default();
|
||||
docs_in_the_sixties.search(&*searcher, &mut count_collector)?;
|
||||
docs_in_the_sixties.search(&searcher, &mut count_collector)?;
|
||||
assert_eq!(count_collector.count(), 2285);
|
||||
Ok(())
|
||||
}
|
||||
@@ -369,9 +365,7 @@ mod tests {
|
||||
let searcher = index.searcher();
|
||||
let count_multiples = |range_query: RangeQuery| {
|
||||
let mut count_collector = CountCollector::default();
|
||||
range_query
|
||||
.search(&*searcher, &mut count_collector)
|
||||
.unwrap();
|
||||
range_query.search(&searcher, &mut count_collector).unwrap();
|
||||
count_collector.count()
|
||||
};
|
||||
|
||||
|
||||
@@ -82,7 +82,7 @@ impl RegexQuery {
|
||||
let automaton = Regex::new(&self.regex_pattern)
|
||||
.map_err(|_| TantivyError::InvalidArgument(self.regex_pattern.clone()))?;
|
||||
|
||||
Ok(AutomatonWeight::new(self.field.clone(), automaton))
|
||||
Ok(AutomatonWeight::new(self.field, automaton))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ mod test {
|
||||
let mut collector = TopCollector::with_limit(2);
|
||||
let regex_query = RegexQuery::new("jap[ao]n".to_string(), country_field);
|
||||
searcher.search(®ex_query, &mut collector).unwrap();
|
||||
let scored_docs = collector.score_docs();
|
||||
let scored_docs = collector.top_docs();
|
||||
assert_eq!(scored_docs.len(), 1, "Expected only 1 document");
|
||||
let (score, _) = scored_docs[0];
|
||||
assert_nearly_equals(1f32, score);
|
||||
@@ -132,7 +132,7 @@ mod test {
|
||||
let mut collector = TopCollector::with_limit(2);
|
||||
let regex_query = RegexQuery::new("jap[A-Z]n".to_string(), country_field);
|
||||
searcher.search(®ex_query, &mut collector).unwrap();
|
||||
let scored_docs = collector.score_docs();
|
||||
let scored_docs = collector.top_docs();
|
||||
assert_eq!(scored_docs.len(), 0, "Expected ZERO document");
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user